-
module ApplicationCable
-
class Connection < ActionCable::Connection::Base
-
identified_by :current_user
-
-
def connect
-
set_current_user || reject_unauthorized_connection
-
end
-
-
private
-
def set_current_user
-
if session = Session.find_by(id: cookies.signed[:session_id])
-
self.current_user = session.user
-
end
-
end
-
end
-
end
-
class BrandComplianceChannel < ApplicationCable::Channel
-
def subscribed
-
if brand = find_brand
-
# Subscribe to brand-specific compliance updates
-
stream_from "brand_compliance_#{brand.id}"
-
-
# Subscribe to session-specific updates if session_id provided
-
if params[:session_id].present?
-
stream_from "compliance_session_#{params[:session_id]}"
-
end
-
-
# Send initial connection confirmation
-
transmit(
-
event: "subscription_confirmed",
-
brand_id: brand.id,
-
session_id: params[:session_id]
-
)
-
else
-
reject
-
end
-
end
-
-
def unsubscribed
-
# Cleanup any ongoing compliance checks for this session
-
if params[:session_id].present?
-
cancel_session_jobs(params[:session_id])
-
end
-
end
-
-
# Client can request compliance check
-
def check_compliance(data)
-
brand = find_brand
-
return unless brand && authorized_to_check?(brand)
-
-
content = data["content"]
-
content_type = data["content_type"] || "general"
-
options = build_check_options(data)
-
-
# Validate input
-
if content.blank?
-
transmit_error("Content cannot be blank")
-
return
-
end
-
-
# Start compliance check
-
if data["async"] == false
-
# Synchronous check for small content
-
perform_sync_check(brand, content, content_type, options)
-
else
-
# Asynchronous check for larger content
-
perform_async_check(brand, content, content_type, options)
-
end
-
end
-
-
# Client can request specific aspect validation
-
def validate_aspect(data)
-
brand = find_brand
-
return unless brand && authorized_to_check?(brand)
-
-
aspect = data["aspect"]&.to_sym
-
content = data["content"]
-
-
unless %i[tone sentiment readability brand_voice colors typography].include?(aspect)
-
transmit_error("Invalid aspect: #{aspect}")
-
return
-
end
-
-
service = Branding::ComplianceServiceV2.new(brand, content, "general")
-
result = service.check_specific_aspects([aspect])
-
-
transmit(
-
event: "aspect_validated",
-
aspect: aspect,
-
result: result[aspect]
-
)
-
rescue StandardError => e
-
transmit_error("Validation failed: #{e.message}")
-
end
-
-
# Client can request fix preview
-
def preview_fix(data)
-
brand = find_brand
-
return unless brand && authorized_to_check?(brand)
-
-
violation_id = data["violation_id"]
-
content = data["content"]
-
-
# Find the violation in the current session
-
violation = find_session_violation(violation_id)
-
unless violation
-
transmit_error("Violation not found")
-
return
-
end
-
-
suggestion_engine = Branding::Compliance::SuggestionEngine.new(brand, [violation])
-
fix = suggestion_engine.generate_fix(violation, content)
-
-
transmit(
-
event: "fix_preview",
-
violation_id: violation_id,
-
fix: fix
-
)
-
rescue StandardError => e
-
transmit_error("Fix generation failed: #{e.message}")
-
end
-
-
# Client can get suggestions for specific violation
-
def get_suggestions(data)
-
brand = find_brand
-
return unless brand && authorized_to_check?(brand)
-
-
violation_ids = Array(data["violation_ids"])
-
violations = find_session_violations(violation_ids)
-
-
suggestion_engine = Branding::Compliance::SuggestionEngine.new(brand, violations)
-
suggestions = suggestion_engine.generate_suggestions
-
-
transmit(
-
event: "suggestions_generated",
-
violation_ids: violation_ids,
-
suggestions: suggestions
-
)
-
rescue StandardError => e
-
transmit_error("Suggestion generation failed: #{e.message}")
-
end
-
-
private
-
-
def find_brand
-
Brand.find_by(id: params[:brand_id])
-
end
-
-
def authorized_to_check?(brand)
-
# Check if current user has permission to check compliance for this brand
-
return true if brand.user_id == current_user&.id
-
-
# Check team permissions
-
current_user&.has_brand_permission?(brand, :check_compliance)
-
end
-
-
def build_check_options(data)
-
{
-
session_id: params[:session_id],
-
user_id: current_user&.id,
-
broadcast_events: true,
-
compliance_level: data["compliance_level"]&.to_sym || :standard,
-
channel: data["channel"],
-
audience: data["audience"],
-
generate_suggestions: data["generate_suggestions"] != false,
-
visual_data: data["visual_data"]
-
}
-
end
-
-
def perform_sync_check(brand, content, content_type, options)
-
transmit(event: "check_started", mode: "sync")
-
-
service = Branding::ComplianceServiceV2.new(brand, content, content_type, options)
-
results = service.check_compliance
-
-
# Store results in session cache
-
cache_session_results(results)
-
-
transmit(
-
event: "check_complete",
-
results: sanitize_results(results)
-
)
-
rescue StandardError => e
-
transmit_error("Compliance check failed: #{e.message}")
-
end
-
-
def perform_async_check(brand, content, content_type, options)
-
transmit(event: "check_started", mode: "async")
-
-
job = BrandComplianceJob.perform_later(
-
brand.id,
-
content,
-
content_type,
-
options.merge(
-
broadcast_events: true,
-
session_id: params[:session_id]
-
)
-
)
-
-
transmit(
-
event: "job_queued",
-
job_id: job.job_id
-
)
-
rescue StandardError => e
-
transmit_error("Failed to queue compliance check: #{e.message}")
-
end
-
-
def cache_session_results(results)
-
return unless params[:session_id]
-
-
Rails.cache.write(
-
"compliance_session:#{params[:session_id]}:results",
-
results,
-
expires_in: 1.hour
-
)
-
end
-
-
def find_session_violation(violation_id)
-
return unless params[:session_id]
-
-
results = Rails.cache.read("compliance_session:#{params[:session_id]}:results")
-
results&.dig(:violations)&.find { |v| v[:id] == violation_id }
-
end
-
-
def find_session_violations(violation_ids)
-
return [] unless params[:session_id]
-
-
results = Rails.cache.read("compliance_session:#{params[:session_id]}:results")
-
violations = results&.dig(:violations) || []
-
violations.select { |v| violation_ids.include?(v[:id]) }
-
end
-
-
def cancel_session_jobs(session_id)
-
# Implementation would depend on job tracking system
-
# This is a placeholder for canceling any ongoing jobs
-
end
-
-
def transmit_error(message)
-
transmit(
-
event: "error",
-
message: message,
-
timestamp: Time.current.iso8601
-
)
-
end
-
-
def sanitize_results(results)
-
# Remove any sensitive or unnecessary data before transmitting
-
results.slice(
-
:compliant,
-
:score,
-
:summary,
-
:violations,
-
:suggestions,
-
:metadata
-
).deep_transform_values do |value|
-
case value
-
when ActiveRecord::Base
-
value.id
-
when Time, DateTime
-
value.iso8601
-
else
-
value
-
end
-
end
-
end
-
end
-
class ActivitiesController < ApplicationController
-
def index
-
@activities = current_user.activities
-
.includes(:user)
-
.recent
-
.page(params[:page])
-
.per(25)
-
-
# Filter by date range
-
if params[:start_date].present?
-
@activities = @activities.where("occurred_at >= ?", params[:start_date])
-
end
-
-
if params[:end_date].present?
-
@activities = @activities.where("occurred_at <= ?", params[:end_date])
-
end
-
-
# Filter by status
-
case params[:status]
-
when "suspicious"
-
@activities = @activities.suspicious
-
when "failed"
-
@activities = @activities.failed_requests
-
when "successful"
-
@activities = @activities.successful_requests
-
end
-
-
# Activity statistics
-
@stats = {
-
total: current_user.activities.count,
-
today: current_user.activities.today.count,
-
this_week: current_user.activities.this_week.count,
-
suspicious: current_user.activities.suspicious.count,
-
failed_requests: current_user.activities.failed_requests.count
-
}
-
end
-
end
-
class ActivityReportsController < ApplicationController
-
before_action :require_authentication
-
-
def show
-
@start_date = params[:start_date] ? Date.parse(params[:start_date]) : 30.days.ago
-
@end_date = params[:end_date] ? Date.parse(params[:end_date]) : Date.current
-
-
@report = ActivityReportService.new(
-
current_user,
-
start_date: @start_date,
-
end_date: @end_date
-
).generate_report
-
-
respond_to do |format|
-
format.html
-
format.json { render json: @report }
-
format.pdf { render_pdf } if defined?(Prawn)
-
end
-
end
-
-
def export
-
@start_date = params[:start_date] ? Date.parse(params[:start_date]) : 30.days.ago
-
@end_date = params[:end_date] ? Date.parse(params[:end_date]) : Date.current
-
-
activities = current_user.activities
-
.where(occurred_at: @start_date.beginning_of_day..@end_date.end_of_day)
-
.order(:occurred_at)
-
-
respond_to do |format|
-
format.csv { send_data generate_csv(activities), filename: "activity_report_#{Date.current}.csv" }
-
end
-
end
-
-
private
-
-
def generate_csv(activities)
-
require 'csv'
-
-
CSV.generate(headers: true) do |csv|
-
csv << [
-
'Date/Time',
-
'Action',
-
'Path',
-
'Method',
-
'Status',
-
'Response Time (ms)',
-
'IP Address',
-
'Device',
-
'Browser',
-
'OS',
-
'Suspicious',
-
'Reasons'
-
]
-
-
activities.find_each do |activity|
-
csv << [
-
activity.occurred_at.strftime('%Y-%m-%d %H:%M:%S'),
-
activity.full_action,
-
activity.request_path,
-
activity.request_method,
-
activity.response_status,
-
activity.duration_in_ms,
-
activity.ip_address,
-
activity.device_type,
-
activity.browser_name,
-
activity.os_name,
-
activity.suspicious? ? 'Yes' : 'No',
-
activity.metadata['suspicious_reasons']&.join(', ')
-
]
-
end
-
end
-
end
-
-
def render_pdf
-
# This would require the Prawn gem
-
# Implementation depends on specific PDF requirements
-
render plain: "PDF export not implemented", status: :not_implemented
-
end
-
end
-
class AdminController < ApplicationController
-
before_action :ensure_admin
-
-
def index
-
@users = User.all.limit(20)
-
@recent_activities = Activity.includes(:user).order(occurred_at: :desc).limit(10)
-
@admin_audit_logs = AdminAuditLog.includes(:user).order(created_at: :desc).limit(10)
-
end
-
-
def users
-
@users = User.all
-
end
-
-
def activities
-
@activities = Activity.includes(:user).order(occurred_at: :desc).page(params[:page]).per(50)
-
end
-
-
def audit_logs
-
@audit_logs = AdminAuditLog.includes(:user).order(created_at: :desc).page(params[:page]).per(50)
-
end
-
-
private
-
-
def ensure_admin
-
unless current_user&.admin?
-
redirect_to root_path, alert: "Access denied. Admin privileges required."
-
end
-
end
-
end
-
class Api::V1::AnalyticsController < Api::V1::BaseController
-
-
# GET /api/v1/analytics/overview
-
def overview
-
days = [params[:days].to_i, 7].max
-
days = [days, 365].min # Cap at 1 year
-
-
overview_data = {
-
summary: calculate_user_overview(days),
-
journeys: calculate_journey_overview(days),
-
campaigns: calculate_campaign_overview(days),
-
performance: calculate_performance_overview(days)
-
}
-
-
render_success(data: overview_data)
-
end
-
-
# GET /api/v1/analytics/journeys/:id
-
def journey_analytics
-
journey = current_user.journeys.find(params[:id])
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
analytics_data = {
-
summary: journey.analytics_summary(days),
-
performance_score: journey.latest_performance_score,
-
funnel_performance: journey.funnel_performance('default', days),
-
trends: journey.performance_trends(7),
-
ab_test_status: journey.ab_test_status,
-
step_analytics: calculate_step_analytics(journey, days),
-
conversion_metrics: calculate_journey_conversions(journey, days),
-
engagement_metrics: calculate_journey_engagement(journey, days)
-
}
-
-
render_success(data: analytics_data)
-
end
-
-
# GET /api/v1/analytics/campaigns/:id
-
def campaign_analytics
-
campaign = current_user.campaigns.find(params[:id])
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
analytics_service = CampaignAnalyticsService.new(campaign)
-
analytics_data = analytics_service.generate_report(days)
-
-
render_success(data: analytics_data)
-
end
-
-
# GET /api/v1/analytics/funnels/:journey_id
-
def funnel_analytics
-
journey = current_user.journeys.find(params[:journey_id])
-
funnel_name = params[:funnel_name] || 'default'
-
days = [params[:days].to_i, 7].max
-
days = [days, 90].min
-
-
start_date = days.days.ago
-
end_date = Time.current
-
-
funnel_data = {
-
overview: ConversionFunnel.funnel_overview(journey.id, funnel_name, start_date, end_date),
-
steps: ConversionFunnel.funnel_step_breakdown(journey.id, funnel_name, start_date, end_date),
-
trends: ConversionFunnel.funnel_trends(journey.id, funnel_name, start_date, end_date),
-
drop_off_analysis: calculate_drop_off_analysis(journey, funnel_name, start_date, end_date)
-
}
-
-
render_success(data: funnel_data)
-
end
-
-
# GET /api/v1/analytics/ab_tests/:id
-
def ab_test_analytics
-
ab_test = current_user.ab_tests.find(params[:id])
-
days = [params[:days].to_i, ab_test.duration_days].max
-
-
ab_analytics_service = AbTestAnalyticsService.new(ab_test)
-
analytics_data = ab_analytics_service.generate_report(days)
-
-
render_success(data: analytics_data)
-
end
-
-
# GET /api/v1/analytics/comparative
-
def comparative_analytics
-
journey_ids = params[:journey_ids].to_s.split(',').map(&:to_i)
-
-
if journey_ids.empty? || journey_ids.count > 5
-
return render_error(message: 'Please provide 1-5 journey IDs for comparison')
-
end
-
-
journeys = current_user.journeys.where(id: journey_ids)
-
-
unless journeys.count == journey_ids.count
-
return render_error(message: 'One or more journeys not found')
-
end
-
-
days = [params[:days].to_i, 30].max
-
days = [days, 90].min
-
-
comparison_service = JourneyComparisonService.new(journeys)
-
comparison_data = comparison_service.generate_comparison(days)
-
-
render_success(data: comparison_data)
-
end
-
-
# GET /api/v1/analytics/trends
-
def trends
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
metric = params[:metric] || 'conversion_rate'
-
-
unless %w[conversion_rate engagement_score completion_rate execution_count].include?(metric)
-
return render_error(message: 'Invalid metric specified')
-
end
-
-
trends_data = calculate_user_trends(metric, days)
-
-
render_success(data: trends_data)
-
end
-
-
# GET /api/v1/analytics/personas/:id/performance
-
def persona_performance
-
persona = current_user.personas.find(params[:id])
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
# Get campaigns and journeys associated with this persona
-
campaigns = persona.campaigns.includes(:journeys)
-
journeys = campaigns.flat_map(&:journeys)
-
-
performance_data = {
-
summary: calculate_persona_summary(persona, journeys, days),
-
campaign_performance: calculate_persona_campaign_performance(campaigns, days),
-
journey_performance: calculate_persona_journey_performance(journeys, days),
-
engagement_patterns: calculate_persona_engagement_patterns(persona, days),
-
conversion_insights: calculate_persona_conversion_insights(persona, days)
-
}
-
-
render_success(data: performance_data)
-
end
-
-
# POST /api/v1/analytics/custom_report
-
def custom_report
-
report_params = params.permit(
-
:name, :description, :date_range_days,
-
metrics: [], filters: {}, grouping: []
-
)
-
-
begin
-
# Generate custom analytics report based on parameters
-
report_data = generate_custom_report(report_params)
-
-
render_success(
-
data: report_data,
-
message: 'Custom report generated successfully'
-
)
-
rescue => e
-
render_error(message: "Failed to generate report: #{e.message}")
-
end
-
end
-
-
# GET /api/v1/analytics/real_time
-
def real_time
-
# Get real-time metrics for the last 24 hours
-
real_time_data = {
-
active_journeys: calculate_active_journeys,
-
recent_executions: calculate_recent_executions,
-
live_conversions: calculate_live_conversions,
-
engagement_activity: calculate_engagement_activity,
-
system_health: calculate_system_health
-
}
-
-
render_success(data: real_time_data)
-
end
-
-
private
-
-
def calculate_user_overview(days)
-
journeys = current_user.journeys
-
start_date = days.days.ago
-
-
{
-
total_journeys: journeys.count,
-
active_journeys: journeys.where(status: %w[draft published]).count,
-
total_executions: current_user.journey_executions.where(created_at: start_date..).count,
-
total_campaigns: current_user.campaigns.count,
-
total_personas: current_user.personas.count,
-
period_days: days
-
}
-
end
-
-
def calculate_journey_overview(days)
-
journeys = current_user.journeys.includes(:journey_analytics)
-
start_date = days.days.ago
-
-
analytics = JourneyAnalytics.joins(:journey)
-
.where(journeys: { user: current_user })
-
.where(period_start: start_date..)
-
-
{
-
average_conversion_rate: analytics.average(:conversion_rate)&.round(2) || 0,
-
average_engagement_score: analytics.average(:engagement_score)&.round(2) || 0,
-
total_executions: analytics.sum(:total_executions),
-
completed_executions: analytics.sum(:completed_executions),
-
top_performing: find_top_performing_journeys(5)
-
}
-
end
-
-
def calculate_campaign_overview(days)
-
campaigns = current_user.campaigns.includes(:journeys)
-
-
{
-
active_campaigns: campaigns.where(status: 'active').count,
-
total_journey_count: campaigns.joins(:journeys).count,
-
campaign_performance: campaigns.limit(5).map do |campaign|
-
{
-
id: campaign.id,
-
name: campaign.name,
-
journey_count: campaign.journeys.count,
-
status: campaign.status
-
}
-
end
-
}
-
end
-
-
def calculate_performance_overview(days)
-
start_date = days.days.ago
-
-
# Get performance metrics across all user's journeys
-
user_journey_ids = current_user.journeys.pluck(:id)
-
-
metrics = JourneyMetric.where(journey_id: user_journey_ids)
-
.for_date_range(start_date, Time.current)
-
-
{
-
average_performance_score: calculate_average_performance_score(metrics),
-
trend_direction: calculate_trend_direction(metrics),
-
key_insights: generate_key_insights(metrics)
-
}
-
end
-
-
def calculate_step_analytics(journey, days)
-
journey.journey_steps.includes(:step_executions).map do |step|
-
executions = step.step_executions.where(created_at: days.days.ago..)
-
-
{
-
step_id: step.id,
-
step_name: step.name,
-
step_type: step.content_type,
-
execution_count: executions.count,
-
completion_rate: calculate_step_completion_rate(executions),
-
average_duration: calculate_average_duration(executions)
-
}
-
end
-
end
-
-
def calculate_journey_conversions(journey, days)
-
# Placeholder for detailed conversion calculations
-
{
-
total_conversions: 0,
-
conversion_rate: 0.0,
-
conversion_value: 0.0,
-
conversion_by_source: {},
-
conversion_trends: []
-
}
-
end
-
-
def calculate_journey_engagement(journey, days)
-
# Placeholder for engagement calculations
-
{
-
engagement_score: 0.0,
-
interaction_count: 0,
-
average_session_duration: 0.0,
-
bounce_rate: 0.0,
-
engagement_by_step: []
-
}
-
end
-
-
def calculate_drop_off_analysis(journey, funnel_name, start_date, end_date)
-
# Analyze where users drop off in the funnel
-
steps = journey.journey_steps.order(:position)
-
drop_off_data = []
-
-
steps.each_with_index do |step, index|
-
next_step = steps[index + 1]
-
next unless next_step
-
-
# Calculate drop-off rate between this step and the next
-
current_executions = step.step_executions.where(created_at: start_date..end_date).count
-
next_executions = next_step.step_executions.where(created_at: start_date..end_date).count
-
-
drop_off_rate = current_executions > 0 ? ((current_executions - next_executions).to_f / current_executions * 100).round(2) : 0
-
-
drop_off_data << {
-
from_step: step.name,
-
to_step: next_step.name,
-
drop_off_rate: drop_off_rate,
-
users_lost: current_executions - next_executions
-
}
-
end
-
-
drop_off_data
-
end
-
-
def find_top_performing_journeys(limit)
-
current_user.journeys
-
.joins(:journey_analytics)
-
.group('journeys.id, journeys.name')
-
.order('AVG(journey_analytics.conversion_rate) DESC')
-
.limit(limit)
-
.pluck('journeys.id, journeys.name, AVG(journey_analytics.conversion_rate)')
-
.map { |id, name, rate| { id: id, name: name, conversion_rate: rate.round(2) } }
-
end
-
-
def calculate_average_performance_score(metrics)
-
return 0.0 if metrics.empty?
-
-
# Calculate weighted performance score across all metrics
-
total_score = metrics.sum do |metric|
-
conversion_weight = 0.4
-
engagement_weight = 0.3
-
completion_weight = 0.3
-
-
(metric.conversion_rate * conversion_weight +
-
metric.engagement_score * engagement_weight +
-
metric.completion_rate * completion_weight)
-
end
-
-
(total_score / metrics.count).round(1)
-
end
-
-
def calculate_trend_direction(metrics)
-
return 'stable' if metrics.count < 2
-
-
recent_scores = metrics.order(:period_start).last(7).map(&:conversion_rate)
-
return 'stable' if recent_scores.count < 2
-
-
trend = (recent_scores.last - recent_scores.first) / recent_scores.first
-
-
if trend > 0.05
-
'improving'
-
elsif trend < -0.05
-
'declining'
-
else
-
'stable'
-
end
-
end
-
-
def generate_key_insights(metrics)
-
insights = []
-
-
# Add performance insights based on metrics analysis
-
if metrics.any?
-
avg_conversion = metrics.average(:conversion_rate)
-
-
if avg_conversion > 10
-
insights << "Strong conversion performance across journeys"
-
elsif avg_conversion < 2
-
insights << "Conversion rates could be improved"
-
end
-
-
high_engagement = metrics.where('engagement_score > ?', 75).count
-
if high_engagement > metrics.count * 0.7
-
insights << "High engagement levels maintained"
-
end
-
end
-
-
insights
-
end
-
-
def calculate_user_trends(metric, days)
-
# Calculate trends for specified metric over time
-
user_journey_ids = current_user.journeys.pluck(:id)
-
-
analytics = JourneyAnalytics.where(journey_id: user_journey_ids)
-
.where(period_start: days.days.ago..)
-
.order(:period_start)
-
-
trends = analytics.group("DATE(period_start)").average(metric)
-
-
{
-
metric: metric,
-
period_days: days,
-
data_points: trends.map { |date, value| { date: date, value: value&.round(2) || 0 } }
-
}
-
end
-
-
def calculate_persona_summary(persona, journeys, days)
-
{
-
persona_name: persona.name,
-
total_journeys: journeys.count,
-
total_campaigns: persona.campaigns.count,
-
performance_score: calculate_persona_performance_score(journeys, days)
-
}
-
end
-
-
def calculate_persona_campaign_performance(campaigns, days)
-
campaigns.map do |campaign|
-
{
-
id: campaign.id,
-
name: campaign.name,
-
status: campaign.status,
-
journey_count: campaign.journeys.count
-
}
-
end
-
end
-
-
def calculate_persona_journey_performance(journeys, days)
-
journeys.map do |journey|
-
{
-
id: journey.id,
-
name: journey.name,
-
performance_score: journey.latest_performance_score,
-
conversion_rate: journey.current_analytics&.conversion_rate || 0
-
}
-
end
-
end
-
-
def calculate_persona_engagement_patterns(persona, days)
-
# Placeholder for persona engagement analysis
-
{
-
preferred_channels: [],
-
engagement_times: [],
-
content_preferences: []
-
}
-
end
-
-
def calculate_persona_conversion_insights(persona, days)
-
# Placeholder for persona conversion analysis
-
{
-
conversion_triggers: [],
-
optimal_journey_length: 0,
-
successful_touchpoints: []
-
}
-
end
-
-
def calculate_persona_performance_score(journeys, days)
-
return 0.0 if journeys.empty?
-
-
scores = journeys.map(&:latest_performance_score).compact
-
return 0.0 if scores.empty?
-
-
(scores.sum.to_f / scores.count).round(1)
-
end
-
-
def generate_custom_report(report_params)
-
# Placeholder for custom report generation
-
{
-
report_name: report_params[:name],
-
generated_at: Time.current,
-
data: {
-
summary: "Custom report functionality would be implemented here",
-
metrics: report_params[:metrics] || [],
-
filters_applied: report_params[:filters] || {}
-
}
-
}
-
end
-
-
def calculate_active_journeys
-
current_user.journeys.where(status: %w[draft published]).count
-
end
-
-
def calculate_recent_executions
-
current_user.journey_executions.where(created_at: 24.hours.ago..).count
-
end
-
-
def calculate_live_conversions
-
# Placeholder for real-time conversion tracking
-
0
-
end
-
-
def calculate_engagement_activity
-
# Placeholder for real-time engagement tracking
-
{
-
active_sessions: 0,
-
recent_interactions: 0
-
}
-
end
-
-
def calculate_system_health
-
{
-
status: 'healthy',
-
response_time: 'normal',
-
uptime: '99.9%'
-
}
-
end
-
-
def calculate_step_completion_rate(executions)
-
return 0.0 if executions.empty?
-
-
completed_count = executions.completed.count
-
total_count = executions.count
-
-
return 0.0 if total_count == 0
-
(completed_count.to_f / total_count * 100).round(2)
-
end
-
-
def calculate_average_duration(executions)
-
completed_executions = executions.completed.where.not(completed_at: nil, started_at: nil)
-
return 0.0 if completed_executions.empty?
-
-
durations = completed_executions.map do |execution|
-
(execution.completed_at - execution.started_at) / 1.hour # Convert to hours
-
end
-
-
(durations.sum / durations.count).round(2)
-
end
-
end
-
class Api::V1::BaseController < ApplicationController
-
# Skip CSRF protection for API endpoints
-
skip_before_action :verify_authenticity_token
-
-
# Use JSON format by default
-
before_action :set_default_format
-
-
# Include API-specific concerns
-
include ApiAuthentication
-
include ApiErrorHandling
-
include ApiPagination
-
-
private
-
-
def set_default_format
-
request.format = :json unless params[:format]
-
end
-
-
# API-specific success response format
-
def render_success(data: nil, message: nil, status: :ok, meta: {})
-
response_body = { success: true }
-
response_body[:data] = data if data
-
response_body[:message] = message if message
-
response_body[:meta] = meta if meta.any?
-
-
render json: response_body, status: status
-
end
-
-
# API-specific error response format
-
def render_error(message: nil, errors: {}, status: :unprocessable_entity, code: nil)
-
response_body = {
-
success: false,
-
message: message || 'An error occurred'
-
}
-
response_body[:code] = code if code
-
response_body[:errors] = errors if errors.any?
-
-
render json: response_body, status: status
-
end
-
-
# Ensure user can only access their own resources
-
def ensure_user_resource_access(resource)
-
unless resource&.user == current_user
-
render_error(message: 'Resource not found', status: :not_found)
-
return false
-
end
-
true
-
end
-
end
-
module Api
-
module V1
-
class BrandComplianceController < ApplicationController
-
before_action :authenticate_user!
-
before_action :set_brand
-
before_action :authorize_brand_access
-
-
# POST /api/v1/brands/:brand_id/compliance/check
-
def check
-
content = compliance_params[:content]
-
content_type = compliance_params[:content_type] || "general"
-
-
if content.blank?
-
render json: { error: "Content is required" }, status: :unprocessable_entity
-
return
-
end
-
-
options = build_compliance_options
-
-
# Use async processing for large content
-
if content.length > 10_000 && params[:sync] != "true"
-
job = BrandComplianceJob.perform_later(
-
@brand.id,
-
content,
-
content_type,
-
options.merge(
-
user_id: current_user.id,
-
notify: params[:notify] == "true",
-
store_results: true
-
)
-
)
-
-
render json: {
-
status: "processing",
-
job_id: job.job_id,
-
message: "Compliance check queued for processing"
-
}, status: :accepted
-
else
-
service = Branding::ComplianceServiceV2.new(@brand, content, content_type, options)
-
results = service.check_compliance
-
-
# Store results if requested
-
store_results(results) if params[:store_results] == "true"
-
-
render json: format_compliance_results(results)
-
end
-
rescue StandardError => e
-
render json: { error: e.message }, status: :internal_server_error
-
end
-
-
# POST /api/v1/brands/:brand_id/compliance/validate_aspect
-
def validate_aspect
-
aspect = params[:aspect]&.to_sym
-
content = compliance_params[:content]
-
-
unless %i[tone sentiment readability brand_voice colors typography logo composition].include?(aspect)
-
render json: { error: "Invalid aspect: #{aspect}" }, status: :unprocessable_entity
-
return
-
end
-
-
service = Branding::ComplianceServiceV2.new(@brand, content, "general", build_compliance_options)
-
results = service.check_specific_aspects([aspect])
-
-
render json: {
-
aspect: aspect,
-
results: results[aspect],
-
timestamp: Time.current
-
}
-
rescue StandardError => e
-
render json: { error: e.message }, status: :internal_server_error
-
end
-
-
# POST /api/v1/brands/:brand_id/compliance/preview_fix
-
def preview_fix
-
violation = params[:violation]
-
content = compliance_params[:content]
-
-
unless violation.present?
-
render json: { error: "Violation data is required" }, status: :unprocessable_entity
-
return
-
end
-
-
suggestion_engine = Branding::Compliance::SuggestionEngine.new(@brand, [violation])
-
fix = suggestion_engine.generate_fix(violation, content)
-
-
render json: {
-
violation_id: violation[:id],
-
fix: fix,
-
alternatives: suggestion_engine.suggest_alternatives(
-
content[0..100],
-
{ content_type: params[:content_type], audience: params[:audience] }
-
)
-
}
-
rescue StandardError => e
-
render json: { error: e.message }, status: :internal_server_error
-
end
-
-
# GET /api/v1/brands/:brand_id/compliance/history
-
def history
-
results = @brand.compliance_results
-
.by_content_type(params[:content_type])
-
.recent
-
.page(params[:page])
-
.per(params[:per_page] || 20)
-
-
render json: {
-
results: results.map { |r| format_history_result(r) },
-
pagination: {
-
current_page: results.current_page,
-
total_pages: results.total_pages,
-
total_count: results.total_count
-
},
-
statistics: {
-
average_score: results.average_score,
-
compliance_rate: results.compliance_rate,
-
common_violations: @brand.compliance_results.common_violations(5)
-
}
-
}
-
end
-
-
# POST /api/v1/brands/:brand_id/compliance/validate_and_fix
-
def validate_and_fix
-
content = compliance_params[:content]
-
content_type = compliance_params[:content_type] || "general"
-
-
service = Branding::ComplianceServiceV2.new(@brand, content, content_type, build_compliance_options)
-
results = service.validate_and_fix
-
-
render json: {
-
original_compliant: results[:original_results][:compliant],
-
original_score: results[:original_results][:score],
-
fixes_applied: results[:fixes_applied],
-
final_compliant: results[:final_results][:compliant],
-
final_score: results[:final_results][:score],
-
fixed_content: results[:fixed_content]
-
}
-
rescue StandardError => e
-
render json: { error: e.message }, status: :internal_server_error
-
end
-
-
private
-
-
def set_brand
-
@brand = Brand.find(params[:brand_id])
-
rescue ActiveRecord::RecordNotFound
-
render json: { error: "Brand not found" }, status: :not_found
-
end
-
-
def authorize_brand_access
-
unless @brand.user_id == current_user.id || current_user.has_brand_permission?(@brand, :check_compliance)
-
render json: { error: "Unauthorized" }, status: :forbidden
-
end
-
end
-
-
def compliance_params
-
params.permit(:content, :content_type, :visual_data => {})
-
end
-
-
def build_compliance_options
-
{
-
compliance_level: (params[:compliance_level] || "standard").to_sym,
-
generate_suggestions: params[:suggestions] != "false",
-
channel: params[:channel],
-
audience: params[:audience],
-
cache_results: params[:cache] != "false",
-
visual_data: params[:visual_data]
-
}
-
end
-
-
def store_results(results)
-
ComplianceResult.create!(
-
brand: @brand,
-
content_type: params[:content_type] || "general",
-
content_hash: Digest::SHA256.hexdigest(compliance_params[:content]),
-
compliant: results[:compliant],
-
score: results[:score],
-
violations_count: results[:violations]&.count || 0,
-
violations_data: results[:violations] || [],
-
suggestions_data: results[:suggestions] || [],
-
analysis_data: results[:analysis] || {},
-
metadata: results[:metadata] || {}
-
)
-
rescue StandardError => e
-
Rails.logger.error "Failed to store compliance results: #{e.message}"
-
end
-
-
def format_compliance_results(results)
-
{
-
compliant: results[:compliant],
-
score: results[:score],
-
summary: results[:summary],
-
violations: format_violations(results[:violations]),
-
suggestions: format_suggestions(results[:suggestions]),
-
metadata: {
-
processing_time: results[:metadata][:processing_time],
-
validators_used: results[:metadata][:validators_used],
-
compliance_level: results[:metadata][:compliance_level],
-
timestamp: Time.current
-
}
-
}
-
end
-
-
def format_violations(violations)
-
return [] unless violations
-
-
violations.map do |violation|
-
{
-
id: violation[:id],
-
type: violation[:type],
-
severity: violation[:severity],
-
message: violation[:message],
-
validator: violation[:validator_type],
-
position: violation[:position],
-
details: violation[:details]
-
}
-
end
-
end
-
-
def format_suggestions(suggestions)
-
return [] unless suggestions
-
-
suggestions.map do |suggestion|
-
{
-
type: suggestion[:type],
-
priority: suggestion[:priority],
-
title: suggestion[:title],
-
description: suggestion[:description],
-
actions: suggestion[:specific_actions],
-
effort: suggestion[:effort_level],
-
estimated_time: suggestion[:estimated_time]
-
}
-
end
-
end
-
-
def format_history_result(result)
-
{
-
id: result.id,
-
content_type: result.content_type,
-
compliant: result.compliant,
-
score: result.score,
-
violations_count: result.violations_count,
-
high_severity_count: result.high_severity_violations.count,
-
created_at: result.created_at,
-
processing_time: result.processing_time_seconds
-
}
-
end
-
end
-
end
-
end
-
class Api::V1::CampaignsController < Api::V1::BaseController
-
before_action :set_campaign, only: [:show, :update, :destroy, :activate, :pause, :analytics]
-
-
# GET /api/v1/campaigns
-
def index
-
campaigns = current_user.campaigns.includes(:persona, :journeys)
-
-
# Apply filters
-
campaigns = campaigns.where(status: params[:status]) if params[:status].present?
-
campaigns = campaigns.where(campaign_type: params[:campaign_type]) if params[:campaign_type].present?
-
campaigns = campaigns.where(industry: params[:industry]) if params[:industry].present?
-
campaigns = campaigns.where(persona_id: params[:persona_id]) if params[:persona_id].present?
-
-
# Apply search
-
if params[:search].present?
-
campaigns = campaigns.where(
-
'name ILIKE ? OR description ILIKE ?',
-
"%#{params[:search]}%", "%#{params[:search]}%"
-
)
-
end
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'name'
-
campaigns = campaigns.order(:name)
-
when 'status'
-
campaigns = campaigns.order(:status, :name)
-
when 'created_at'
-
campaigns = campaigns.order(:created_at)
-
when 'updated_at'
-
campaigns = campaigns.order(:updated_at)
-
else
-
campaigns = campaigns.order(updated_at: :desc)
-
end
-
-
paginate_and_render(campaigns, serializer: method(:serialize_campaign_summary))
-
end
-
-
# GET /api/v1/campaigns/:id
-
def show
-
render_success(data: serialize_campaign_detail(@campaign))
-
end
-
-
# POST /api/v1/campaigns
-
def create
-
campaign = current_user.campaigns.build(campaign_params)
-
-
if campaign.save
-
render_success(
-
data: serialize_campaign_detail(campaign),
-
message: 'Campaign created successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create campaign',
-
errors: campaign.errors.as_json
-
)
-
end
-
end
-
-
# PUT /api/v1/campaigns/:id
-
def update
-
if @campaign.update(campaign_params)
-
render_success(
-
data: serialize_campaign_detail(@campaign),
-
message: 'Campaign updated successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to update campaign',
-
errors: @campaign.errors.as_json
-
)
-
end
-
end
-
-
# DELETE /api/v1/campaigns/:id
-
def destroy
-
@campaign.destroy!
-
render_success(message: 'Campaign deleted successfully')
-
end
-
-
# POST /api/v1/campaigns/:id/activate
-
def activate
-
if @campaign.activate!
-
render_success(
-
data: serialize_campaign_detail(@campaign),
-
message: 'Campaign activated successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to activate campaign',
-
errors: @campaign.errors.as_json
-
)
-
end
-
end
-
-
# POST /api/v1/campaigns/:id/pause
-
def pause
-
if @campaign.pause!
-
render_success(
-
data: serialize_campaign_detail(@campaign),
-
message: 'Campaign paused successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to pause campaign',
-
errors: @campaign.errors.as_json
-
)
-
end
-
end
-
-
# GET /api/v1/campaigns/:id/analytics
-
def analytics
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
analytics_service = CampaignAnalyticsService.new(@campaign)
-
analytics_data = analytics_service.generate_report(days)
-
-
render_success(data: analytics_data)
-
end
-
-
# GET /api/v1/campaigns/:id/journeys
-
def journeys
-
journeys = @campaign.journeys.includes(:journey_steps, :journey_analytics)
-
-
# Apply filters
-
journeys = journeys.where(status: params[:status]) if params[:status].present?
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'name'
-
journeys = journeys.order(:name)
-
when 'performance'
-
# Sort by latest performance score
-
journeys = journeys.joins(:journey_analytics)
-
.group('journeys.id')
-
.order('AVG(journey_analytics.conversion_rate) DESC')
-
else
-
journeys = journeys.order(created_at: :desc)
-
end
-
-
paginate_and_render(journeys, serializer: method(:serialize_journey_for_campaign))
-
end
-
-
# POST /api/v1/campaigns/:id/journeys
-
def add_journey
-
journey_params = params.require(:journey).permit(:id, :name, :description)
-
-
if journey_params[:id].present?
-
# Associate existing journey
-
journey = current_user.journeys.find(journey_params[:id])
-
journey.update!(campaign: @campaign)
-
else
-
# Create new journey for campaign
-
journey = @campaign.journeys.build(
-
journey_params.merge(user: current_user)
-
)
-
journey.save!
-
end
-
-
render_success(
-
data: serialize_journey_for_campaign(journey),
-
message: 'Journey added to campaign successfully',
-
status: :created
-
)
-
end
-
-
# DELETE /api/v1/campaigns/:id/journeys/:journey_id
-
def remove_journey
-
journey = @campaign.journeys.find(params[:journey_id])
-
journey.update!(campaign: nil)
-
-
render_success(message: 'Journey removed from campaign successfully')
-
end
-
-
# GET /api/v1/campaigns/industries
-
def industries
-
industries = Campaign.where(user: current_user).distinct.pluck(:industry).compact.sort
-
render_success(data: industries)
-
end
-
-
# GET /api/v1/campaigns/types
-
def types
-
types = Campaign::CAMPAIGN_TYPES
-
render_success(data: types)
-
end
-
-
private
-
-
def set_campaign
-
@campaign = current_user.campaigns.find(params[:id])
-
end
-
-
def campaign_params
-
params.require(:campaign).permit(
-
:name, :description, :campaign_type, :industry, :status,
-
:start_date, :end_date, :budget, :persona_id,
-
goals: [], target_metrics: {}, settings: {}
-
)
-
end
-
-
def serialize_campaign_summary(campaign)
-
{
-
id: campaign.id,
-
name: campaign.name,
-
description: campaign.description,
-
campaign_type: campaign.campaign_type,
-
industry: campaign.industry,
-
status: campaign.status,
-
persona_id: campaign.persona_id,
-
persona_name: campaign.persona&.name,
-
journey_count: campaign.journeys.count,
-
start_date: campaign.start_date,
-
end_date: campaign.end_date,
-
budget: campaign.budget,
-
created_at: campaign.created_at,
-
updated_at: campaign.updated_at
-
}
-
end
-
-
def serialize_campaign_detail(campaign)
-
{
-
id: campaign.id,
-
name: campaign.name,
-
description: campaign.description,
-
campaign_type: campaign.campaign_type,
-
industry: campaign.industry,
-
status: campaign.status,
-
start_date: campaign.start_date,
-
end_date: campaign.end_date,
-
budget: campaign.budget,
-
goals: campaign.goals,
-
target_metrics: campaign.target_metrics,
-
settings: campaign.settings,
-
persona: campaign.persona ? serialize_persona_for_campaign(campaign.persona) : nil,
-
journey_count: campaign.journeys.count,
-
created_at: campaign.created_at,
-
updated_at: campaign.updated_at
-
}
-
end
-
-
def serialize_persona_for_campaign(persona)
-
{
-
id: persona.id,
-
name: persona.name,
-
age_range: persona.age_range,
-
location: persona.location,
-
demographic_data: persona.demographic_data,
-
psychographic_data: persona.psychographic_data
-
}
-
end
-
-
def serialize_journey_for_campaign(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
step_count: journey.total_steps,
-
performance_score: journey.latest_performance_score,
-
created_at: journey.created_at,
-
updated_at: journey.updated_at
-
}
-
end
-
end
-
class Api::V1::JourneyStepsController < Api::V1::BaseController
-
before_action :set_journey
-
before_action :set_step, only: [:show, :update, :destroy, :reorder, :duplicate, :execute]
-
-
# GET /api/v1/journeys/:journey_id/steps
-
def index
-
steps = @journey.journey_steps.includes(:transitions_from, :transitions_to)
-
-
# Apply filters
-
steps = steps.where(stage: params[:stage]) if params[:stage].present?
-
steps = steps.where(step_type: params[:step_type]) if params[:step_type].present?
-
steps = steps.where(status: params[:status]) if params[:status].present?
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'position'
-
steps = steps.order(:position)
-
when 'stage'
-
steps = steps.order(:stage, :position)
-
when 'created_at'
-
steps = steps.order(:created_at)
-
else
-
steps = steps.order(:position)
-
end
-
-
paginate_and_render(steps, serializer: method(:serialize_step_summary))
-
end
-
-
# GET /api/v1/journeys/:journey_id/steps/:id
-
def show
-
render_success(data: serialize_step_detail(@step))
-
end
-
-
# POST /api/v1/journeys/:journey_id/steps
-
def create
-
step = @journey.journey_steps.build(step_params)
-
-
# Set position if not provided
-
if step.position.nil?
-
max_position = @journey.journey_steps.maximum(:position) || 0
-
step.position = max_position + 1
-
end
-
-
if step.save
-
render_success(
-
data: serialize_step_detail(step),
-
message: 'Step created successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create step',
-
errors: step.errors.as_json
-
)
-
end
-
end
-
-
# PUT /api/v1/journeys/:journey_id/steps/:id
-
def update
-
if @step.update(step_params)
-
render_success(
-
data: serialize_step_detail(@step),
-
message: 'Step updated successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to update step',
-
errors: @step.errors.as_json
-
)
-
end
-
end
-
-
# DELETE /api/v1/journeys/:journey_id/steps/:id
-
def destroy
-
@step.destroy!
-
render_success(message: 'Step deleted successfully')
-
end
-
-
# PATCH /api/v1/journeys/:journey_id/steps/:id/reorder
-
def reorder
-
new_position = params[:position].to_i
-
-
if new_position > 0
-
@step.update!(position: new_position)
-
render_success(
-
data: serialize_step_detail(@step),
-
message: 'Step reordered successfully'
-
)
-
else
-
render_error(message: 'Invalid position')
-
end
-
end
-
-
# POST /api/v1/journeys/:journey_id/steps/:id/duplicate
-
def duplicate
-
begin
-
new_step = @step.dup
-
new_step.name = "#{@step.name} (Copy)"
-
-
# Set new position
-
max_position = @journey.journey_steps.maximum(:position) || 0
-
new_step.position = max_position + 1
-
-
new_step.save!
-
-
render_success(
-
data: serialize_step_detail(new_step),
-
message: 'Step duplicated successfully',
-
status: :created
-
)
-
rescue => e
-
render_error(message: "Failed to duplicate step: #{e.message}")
-
end
-
end
-
-
# POST /api/v1/journeys/:journey_id/steps/:id/execute
-
def execute
-
execution_params = params.permit(:user_data, metadata: {})
-
-
begin
-
# This would integrate with the journey execution engine
-
execution_result = execute_step(@step, execution_params)
-
-
render_success(
-
data: execution_result,
-
message: 'Step executed successfully'
-
)
-
rescue => e
-
render_error(message: "Failed to execute step: #{e.message}")
-
end
-
end
-
-
# GET /api/v1/journeys/:journey_id/steps/:id/transitions
-
def transitions
-
transitions_from = @step.transitions_from.includes(:to_step)
-
transitions_to = @step.transitions_to.includes(:from_step)
-
-
transitions_data = {
-
outgoing: transitions_from.map { |t| serialize_transition(t) },
-
incoming: transitions_to.map { |t| serialize_transition(t) }
-
}
-
-
render_success(data: transitions_data)
-
end
-
-
# POST /api/v1/journeys/:journey_id/steps/:id/transitions
-
def create_transition
-
transition_params = params.require(:transition).permit(:to_step_id, :condition_type, :condition_data, :weight, metadata: {})
-
-
to_step = @journey.journey_steps.find(transition_params[:to_step_id])
-
-
transition = @step.transitions_from.build(transition_params.merge(to_step: to_step))
-
-
if transition.save
-
render_success(
-
data: serialize_transition(transition),
-
message: 'Transition created successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create transition',
-
errors: transition.errors.as_json
-
)
-
end
-
end
-
-
# GET /api/v1/journeys/:journey_id/steps/:id/analytics
-
def analytics
-
days = [params[:days].to_i, 1].max
-
days = [days, 365].min
-
-
# Get step execution analytics
-
executions = @step.step_executions
-
.where(created_at: days.days.ago..Time.current)
-
.includes(:journey_execution)
-
-
analytics_data = {
-
execution_count: executions.count,
-
completion_rate: calculate_step_completion_rate(executions),
-
average_duration: calculate_average_duration(executions),
-
success_rate: calculate_step_success_rate(executions),
-
conversion_metrics: calculate_step_conversions(executions),
-
engagement_metrics: calculate_step_engagement(executions)
-
}
-
-
render_success(data: analytics_data)
-
end
-
-
private
-
-
def set_journey
-
@journey = current_user.journeys.find(params[:journey_id])
-
end
-
-
def set_step
-
@step = @journey.journey_steps.find(params[:id])
-
end
-
-
def step_params
-
params.require(:step).permit(
-
:name, :description, :step_type, :stage, :position, :timing,
-
:status, :trigger_conditions, :success_criteria,
-
content: {}, metadata: {}, settings: {}
-
)
-
end
-
-
def serialize_step_summary(step)
-
{
-
id: step.id,
-
name: step.name,
-
description: step.description,
-
step_type: step.step_type,
-
stage: step.stage,
-
position: step.position,
-
status: step.status,
-
timing: step.timing,
-
created_at: step.created_at,
-
updated_at: step.updated_at
-
}
-
end
-
-
def serialize_step_detail(step)
-
{
-
id: step.id,
-
journey_id: step.journey_id,
-
name: step.name,
-
description: step.description,
-
step_type: step.step_type,
-
stage: step.stage,
-
position: step.position,
-
timing: step.timing,
-
status: step.status,
-
trigger_conditions: step.trigger_conditions,
-
success_criteria: step.success_criteria,
-
content: step.content,
-
metadata: step.metadata,
-
settings: step.settings,
-
created_at: step.created_at,
-
updated_at: step.updated_at,
-
transitions_count: {
-
outgoing: step.transitions_from.count,
-
incoming: step.transitions_to.count
-
}
-
}
-
end
-
-
def serialize_transition(transition)
-
{
-
id: transition.id,
-
from_step_id: transition.from_step_id,
-
to_step_id: transition.to_step_id,
-
from_step_name: transition.from_step.name,
-
to_step_name: transition.to_step.name,
-
condition_type: transition.condition_type,
-
condition_data: transition.condition_data,
-
weight: transition.weight,
-
metadata: transition.metadata,
-
created_at: transition.created_at
-
}
-
end
-
-
def execute_step(step, execution_params)
-
# Placeholder for step execution logic
-
# This would integrate with the journey execution engine
-
{
-
step_id: step.id,
-
execution_id: SecureRandom.uuid,
-
status: 'executed',
-
executed_at: Time.current,
-
result: 'success',
-
metadata: execution_params[:metadata] || {}
-
}
-
end
-
-
def calculate_step_completion_rate(executions)
-
return 0.0 if executions.empty?
-
-
completed = executions.select { |e| e.status == 'completed' }.count
-
(completed.to_f / executions.count * 100).round(2)
-
end
-
-
def calculate_average_duration(executions)
-
durations = executions.filter_map do |e|
-
next unless e.completed_at && e.started_at
-
(e.completed_at - e.started_at).to_i
-
end
-
-
return 0 if durations.empty?
-
(durations.sum.to_f / durations.count).round(2)
-
end
-
-
def calculate_step_success_rate(executions)
-
return 0.0 if executions.empty?
-
-
successful = executions.select { |e| %w[completed success].include?(e.status) }.count
-
(successful.to_f / executions.count * 100).round(2)
-
end
-
-
def calculate_step_conversions(executions)
-
# Placeholder for conversion tracking
-
{
-
total_conversions: 0,
-
conversion_rate: 0.0,
-
conversion_value: 0.0
-
}
-
end
-
-
def calculate_step_engagement(executions)
-
# Placeholder for engagement metrics
-
{
-
engagement_score: 0.0,
-
interaction_count: 0,
-
average_time_spent: 0.0
-
}
-
end
-
end
-
class Api::V1::JourneySuggestionsController < Api::V1::BaseController
-
-
def index
-
suggestions = generate_suggestions_for_journey
-
render_success(data: { suggestions: suggestions })
-
end
-
-
def for_stage
-
stage = params[:stage]
-
-
unless Journey::STAGES.include?(stage)
-
return render_error(message: 'Invalid stage specified', code: 'INVALID_STAGE')
-
end
-
-
suggestions = generate_suggestions_for_stage(stage)
-
render_success(data: { suggestions: suggestions })
-
end
-
-
def for_step
-
step_data = params.permit(:type, :stage, :previous_steps => [], :journey_context => {})
-
suggestions = generate_suggestions_for_step(step_data)
-
render_success(data: { suggestions: suggestions })
-
end
-
-
def bulk_suggestions
-
request_params = params.permit(:journey_id, :count, stages: [], context: {})
-
-
journey = current_user.journeys.find(request_params[:journey_id]) if request_params[:journey_id]
-
stages = request_params[:stages] || Journey::STAGES
-
count_per_stage = [request_params[:count].to_i, 3].max
-
count_per_stage = [count_per_stage, 10].min # Cap at 10 per stage
-
-
bulk_suggestions = {}
-
-
stages.each do |stage|
-
next unless Journey::STAGES.include?(stage)
-
-
suggestions = generate_suggestions_for_stage(stage)
-
bulk_suggestions[stage] = suggestions.take(count_per_stage)
-
end
-
-
render_success(
-
data: {
-
bulk_suggestions: bulk_suggestions,
-
journey_context: journey ? serialize_journey_context(journey) : nil
-
}
-
)
-
end
-
-
def personalized_suggestions
-
persona_id = params[:persona_id]
-
campaign_id = params[:campaign_id]
-
journey_id = params[:journey_id]
-
-
context = build_personalization_context(persona_id, campaign_id, journey_id)
-
suggestions = generate_personalized_suggestions(context)
-
-
render_success(
-
data: {
-
suggestions: suggestions,
-
personalization_context: context
-
}
-
)
-
end
-
-
def create_feedback
-
feedback_params = params.permit(:suggestion_id, :feedback_type, :rating, :comment, :journey_id, :step_id)
-
-
begin
-
feedback = current_user.suggestion_feedbacks.create!(
-
suggestion_id: feedback_params[:suggestion_id],
-
feedback_type: feedback_params[:feedback_type],
-
rating: feedback_params[:rating],
-
comment: feedback_params[:comment],
-
journey_id: feedback_params[:journey_id],
-
metadata: {
-
step_id: feedback_params[:step_id],
-
created_via_api: true,
-
user_agent: request.user_agent
-
}
-
)
-
-
render_success(
-
data: serialize_feedback(feedback),
-
message: 'Feedback recorded successfully'
-
)
-
rescue => e
-
render_error(message: "Failed to record feedback: #{e.message}")
-
end
-
end
-
-
def feedback_analytics
-
# Get feedback analytics for improving suggestions
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
start_date = days.days.ago
-
feedbacks = current_user.suggestion_feedbacks.where(created_at: start_date..)
-
-
analytics = {
-
total_feedback_count: feedbacks.count,
-
average_rating: feedbacks.average(:rating)&.round(2) || 0,
-
feedback_by_type: feedbacks.group(:feedback_type).count,
-
rating_distribution: feedbacks.group(:rating).count,
-
top_suggestions: find_top_rated_suggestions(feedbacks),
-
improvement_areas: identify_improvement_areas(feedbacks)
-
}
-
-
render_success(data: analytics)
-
end
-
-
def suggestion_history
-
journey_id = params[:journey_id]
-
days = [params[:days].to_i, 30].max
-
days = [days, 90].min
-
-
# This would track suggestion history in a real implementation
-
history_data = {
-
suggestions_generated: 0,
-
suggestions_used: 0,
-
user_satisfaction: 0.0,
-
popular_suggestion_types: [],
-
trend_analysis: {}
-
}
-
-
render_success(data: history_data)
-
end
-
-
def refresh_cache
-
# Clear and refresh suggestion caches
-
# This would integrate with the caching system
-
-
render_success(message: 'Suggestion cache refreshed successfully')
-
end
-
-
private
-
-
def generate_suggestions_for_journey
-
# Generate general journey suggestions based on user context
-
[
-
{
-
id: 'welcome-email-001',
-
type: 'step',
-
title: 'Welcome Email Sequence',
-
description: 'Start with a personalized welcome email to introduce your brand',
-
confidence: 0.95,
-
data: {
-
step_type: 'email_sequence',
-
stage: 'awareness',
-
timing: 'immediate',
-
subject: 'Welcome to [Brand Name]!',
-
template: 'welcome'
-
}
-
},
-
{
-
id: 'social-proof-002',
-
type: 'step',
-
title: 'Social Media Engagement',
-
description: 'Share customer testimonials on social media',
-
confidence: 0.88,
-
data: {
-
step_type: 'social_media',
-
stage: 'consideration',
-
timing: '3_days',
-
channel: 'facebook'
-
}
-
},
-
{
-
id: 'nurture-sequence-003',
-
type: 'step',
-
title: 'Educational Content Series',
-
description: 'Provide valuable content to nurture leads',
-
confidence: 0.92,
-
data: {
-
step_type: 'blog_post',
-
stage: 'consideration',
-
timing: '1_week'
-
}
-
}
-
]
-
end
-
-
def generate_suggestions_for_stage(stage)
-
stage_suggestions = {
-
'awareness' => [
-
{
-
id: "#{stage}-blog-001",
-
type: 'step',
-
title: 'Educational Blog Post',
-
description: 'Create content that addresses common pain points',
-
confidence: 0.90,
-
data: {
-
step_type: 'blog_post',
-
stage: stage,
-
timing: 'immediate'
-
}
-
},
-
{
-
id: "#{stage}-social-001",
-
type: 'step',
-
title: 'Social Media Campaign',
-
description: 'Reach new audiences through targeted social content',
-
confidence: 0.85,
-
data: {
-
step_type: 'social_media',
-
stage: stage,
-
timing: 'immediate'
-
}
-
},
-
{
-
id: "#{stage}-lead-magnet-001",
-
type: 'step',
-
title: 'Lead Magnet',
-
description: 'Offer valuable resource to capture leads',
-
confidence: 0.93,
-
data: {
-
step_type: 'lead_magnet',
-
stage: stage,
-
timing: 'immediate'
-
}
-
}
-
],
-
'consideration' => [
-
{
-
id: "#{stage}-email-sequence-001",
-
type: 'step',
-
title: 'Nurture Email Sequence',
-
description: 'Build relationships with educational content',
-
confidence: 0.95,
-
data: {
-
step_type: 'email_sequence',
-
stage: stage,
-
timing: '1_day'
-
}
-
},
-
{
-
id: "#{stage}-webinar-001",
-
type: 'step',
-
title: 'Educational Webinar',
-
description: 'Demonstrate expertise and build trust',
-
confidence: 0.88,
-
data: {
-
step_type: 'webinar',
-
stage: stage,
-
timing: '1_week'
-
}
-
},
-
{
-
id: "#{stage}-case-study-001",
-
type: 'step',
-
title: 'Customer Case Study',
-
description: 'Show real results and social proof',
-
confidence: 0.91,
-
data: {
-
step_type: 'case_study',
-
stage: stage,
-
timing: '3_days'
-
}
-
}
-
],
-
'conversion' => [
-
{
-
id: "#{stage}-sales-call-001",
-
type: 'step',
-
title: 'Consultation Call',
-
description: 'Personal conversation to address specific needs',
-
confidence: 0.97,
-
data: {
-
step_type: 'sales_call',
-
stage: stage,
-
timing: '1_day'
-
}
-
},
-
{
-
id: "#{stage}-demo-001",
-
type: 'step',
-
title: 'Product Demonstration',
-
description: 'Show how your solution solves their problems',
-
confidence: 0.92,
-
data: {
-
step_type: 'demo',
-
stage: stage,
-
timing: 'immediate'
-
}
-
},
-
{
-
id: "#{stage}-trial-001",
-
type: 'step',
-
title: 'Free Trial Offer',
-
description: 'Let prospects experience your product risk-free',
-
confidence: 0.89,
-
data: {
-
step_type: 'trial_offer',
-
stage: stage,
-
timing: 'immediate'
-
}
-
}
-
],
-
'retention' => [
-
{
-
id: "#{stage}-onboarding-001",
-
type: 'step',
-
title: 'Customer Onboarding',
-
description: 'Ensure new customers get maximum value',
-
confidence: 0.98,
-
data: {
-
step_type: 'onboarding',
-
stage: stage,
-
timing: 'immediate'
-
}
-
},
-
{
-
id: "#{stage}-newsletter-001",
-
type: 'step',
-
title: 'Regular Newsletter',
-
description: 'Keep customers engaged with updates and tips',
-
confidence: 0.86,
-
data: {
-
step_type: 'newsletter',
-
stage: stage,
-
timing: '1_week'
-
}
-
},
-
{
-
id: "#{stage}-feedback-001",
-
type: 'step',
-
title: 'Feedback Survey',
-
description: 'Gather insights to improve customer experience',
-
confidence: 0.82,
-
data: {
-
step_type: 'feedback_survey',
-
stage: stage,
-
timing: '2_weeks'
-
}
-
}
-
]
-
}
-
-
stage_suggestions[stage] || []
-
end
-
-
def generate_suggestions_for_step(step_data)
-
suggestions = []
-
-
# Analyze previous steps to suggest next logical steps
-
previous_steps = step_data[:previous_steps] || []
-
current_stage = step_data[:stage]
-
-
# Logic to suggest next steps based on current step type and stage
-
case step_data[:type]
-
when 'lead_magnet'
-
suggestions << {
-
id: 'follow-up-email-001',
-
type: 'connection',
-
title: 'Follow-up Email',
-
description: 'Send a thank you email with additional resources',
-
confidence: 0.95,
-
data: {
-
step_type: 'email_sequence',
-
stage: 'consideration',
-
timing: '1_day',
-
subject: 'Thank you for downloading [Resource Name]'
-
}
-
}
-
when 'email_sequence'
-
suggestions << {
-
id: 'social-engagement-001',
-
type: 'connection',
-
title: 'Social Media Follow-up',
-
description: 'Engage prospects on social media',
-
confidence: 0.85,
-
data: {
-
step_type: 'social_media',
-
stage: current_stage,
-
timing: '2_days'
-
}
-
}
-
when 'webinar'
-
suggestions << {
-
id: 'sales-call-follow-001',
-
type: 'connection',
-
title: 'Sales Call',
-
description: 'Schedule a call with interested attendees',
-
confidence: 0.92,
-
data: {
-
step_type: 'sales_call',
-
stage: 'conversion',
-
timing: '1_day'
-
}
-
}
-
end
-
-
suggestions
-
end
-
-
def serialize_journey_context(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience,
-
step_count: journey.total_steps,
-
stages_used: journey.steps_by_stage.keys
-
}
-
end
-
-
def build_personalization_context(persona_id, campaign_id, journey_id)
-
context = {}
-
-
if persona_id.present?
-
persona = current_user.personas.find_by(id: persona_id)
-
context[:persona] = persona.to_campaign_context if persona
-
end
-
-
if campaign_id.present?
-
campaign = current_user.campaigns.find_by(id: campaign_id)
-
context[:campaign] = campaign.to_analytics_context if campaign
-
end
-
-
if journey_id.present?
-
journey = current_user.journeys.find_by(id: journey_id)
-
context[:journey] = serialize_journey_context(journey) if journey
-
end
-
-
context
-
end
-
-
def generate_personalized_suggestions(context)
-
# Enhanced suggestions based on persona, campaign, and journey context
-
base_suggestions = generate_suggestions_for_journey
-
-
# Customize suggestions based on context
-
if context[:persona]
-
base_suggestions = filter_suggestions_by_persona(base_suggestions, context[:persona])
-
end
-
-
if context[:campaign]
-
base_suggestions = enhance_suggestions_with_campaign_data(base_suggestions, context[:campaign])
-
end
-
-
base_suggestions
-
end
-
-
def filter_suggestions_by_persona(suggestions, persona_context)
-
# Filter and prioritize suggestions based on persona characteristics
-
suggestions.map do |suggestion|
-
# Adjust confidence scores based on persona fit
-
if persona_context[:age_range] == '25-35' && suggestion[:data][:step_type] == 'social_media'
-
suggestion[:confidence] = [suggestion[:confidence] * 1.1, 1.0].min
-
end
-
-
suggestion
-
end
-
end
-
-
def enhance_suggestions_with_campaign_data(suggestions, campaign_context)
-
# Enhance suggestions with campaign-specific data
-
suggestions.map do |suggestion|
-
suggestion[:data][:campaign_context] = {
-
campaign_type: campaign_context[:campaign_type],
-
industry: campaign_context[:industry]
-
}
-
-
suggestion
-
end
-
end
-
-
def serialize_feedback(feedback)
-
{
-
id: feedback.id,
-
suggestion_id: feedback.suggestion_id,
-
feedback_type: feedback.feedback_type,
-
rating: feedback.rating,
-
comment: feedback.comment,
-
journey_id: feedback.journey_id,
-
created_at: feedback.created_at
-
}
-
end
-
-
def find_top_rated_suggestions(feedbacks)
-
feedbacks.group(:suggestion_id)
-
.average(:rating)
-
.sort_by { |_, rating| -rating }
-
.first(5)
-
.map { |suggestion_id, rating| { suggestion_id: suggestion_id, rating: rating.round(2) } }
-
end
-
-
def identify_improvement_areas(feedbacks)
-
low_rated = feedbacks.where('rating < ?', 3)
-
-
areas = []
-
areas << 'Suggestion relevance' if low_rated.where(feedback_type: 'relevance').count > low_rated.count * 0.3
-
areas << 'Suggestion quality' if low_rated.where(feedback_type: 'quality').count > low_rated.count * 0.3
-
areas << 'Implementation difficulty' if low_rated.where(feedback_type: 'difficulty').count > low_rated.count * 0.3
-
-
areas
-
end
-
end
-
class Api::V1::JourneyTemplatesController < Api::V1::BaseController
-
before_action :set_template, only: [:show, :instantiate, :update, :destroy]
-
-
# GET /api/v1/templates
-
def index
-
templates = JourneyTemplate.published.includes(:user)
-
-
# Apply filters
-
templates = templates.where(category: params[:category]) if params[:category].present?
-
templates = templates.where(industry: params[:industry]) if params[:industry].present?
-
templates = templates.where('name ILIKE ? OR description ILIKE ?', "%#{params[:search]}%", "%#{params[:search]}%") if params[:search].present?
-
-
# Filter by template type
-
if params[:template_type].present?
-
templates = templates.where("metadata ->> 'template_type' = ?", params[:template_type])
-
end
-
-
# Filter by difficulty level
-
if params[:difficulty].present?
-
templates = templates.where("metadata ->> 'difficulty' = ?", params[:difficulty])
-
end
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'name'
-
templates = templates.order(:name)
-
when 'category'
-
templates = templates.order(:category, :name)
-
when 'popularity'
-
templates = templates.order(usage_count: :desc, name: :asc)
-
when 'rating'
-
templates = templates.order('metadata->>\'rating\' DESC NULLS LAST', :name)
-
when 'created_at'
-
templates = templates.order(:created_at)
-
else
-
templates = templates.order(:name)
-
end
-
-
paginate_and_render(templates, serializer: method(:serialize_template_summary))
-
end
-
-
# GET /api/v1/templates/:id
-
def show
-
render_success(data: serialize_template_detail(@template))
-
end
-
-
# POST /api/v1/templates
-
def create
-
template = current_user.journey_templates.build(template_params)
-
-
if template.save
-
render_success(
-
data: serialize_template_detail(template),
-
message: 'Template created successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create template',
-
errors: template.errors.as_json
-
)
-
end
-
end
-
-
# PUT /api/v1/templates/:id
-
def update
-
# Only allow template owner to update
-
unless @template.user == current_user
-
return render_error(message: 'Access denied', status: :forbidden)
-
end
-
-
if @template.update(template_params)
-
render_success(
-
data: serialize_template_detail(@template),
-
message: 'Template updated successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to update template',
-
errors: @template.errors.as_json
-
)
-
end
-
end
-
-
# DELETE /api/v1/templates/:id
-
def destroy
-
# Only allow template owner to delete
-
unless @template.user == current_user
-
return render_error(message: 'Access denied', status: :forbidden)
-
end
-
-
@template.destroy!
-
render_success(message: 'Template deleted successfully')
-
end
-
-
# POST /api/v1/templates/:id/instantiate
-
def instantiate
-
instantiation_params = params.permit(:name, :description, :campaign_id, customizations: {})
-
-
begin
-
journey = @template.instantiate_for_user(current_user, instantiation_params)
-
-
# Increment usage count
-
@template.increment!(:usage_count)
-
-
render_success(
-
data: serialize_instantiated_journey(journey),
-
message: 'Template instantiated successfully',
-
status: :created
-
)
-
rescue => e
-
render_error(message: "Failed to instantiate template: #{e.message}")
-
end
-
end
-
-
# POST /api/v1/templates/:id/clone
-
def clone
-
begin
-
new_template = @template.dup
-
new_template.user = current_user
-
new_template.name = "#{@template.name} (Copy)"
-
new_template.is_public = false
-
new_template.status = 'draft'
-
new_template.usage_count = 0
-
new_template.save!
-
-
render_success(
-
data: serialize_template_detail(new_template),
-
message: 'Template cloned successfully',
-
status: :created
-
)
-
rescue => e
-
render_error(message: "Failed to clone template: #{e.message}")
-
end
-
end
-
-
# GET /api/v1/templates/categories
-
def categories
-
categories = JourneyTemplate.published.distinct.pluck(:category).compact.sort
-
render_success(data: categories)
-
end
-
-
# GET /api/v1/templates/industries
-
def industries
-
industries = JourneyTemplate.published.distinct.pluck(:industry).compact.sort
-
render_success(data: industries)
-
end
-
-
# GET /api/v1/templates/popular
-
def popular
-
limit = [params[:limit].to_i, 1].max
-
limit = [limit, 50].min # Cap at 50
-
-
templates = JourneyTemplate.published
-
.order(usage_count: :desc, name: :asc)
-
.limit(limit)
-
-
render_success(data: templates.map { |t| serialize_template_summary(t) })
-
end
-
-
# GET /api/v1/templates/recommended
-
def recommended
-
# Basic recommendation based on user's journey types and industries
-
user_campaign_types = current_user.journeys.distinct.pluck(:campaign_type).compact
-
user_industries = current_user.journeys.joins(:campaign).distinct.pluck('campaigns.industry').compact
-
-
recommendations = JourneyTemplate.published
-
-
if user_campaign_types.any?
-
recommendations = recommendations.where(
-
"metadata ->> 'recommended_for' ?| array[?]",
-
user_campaign_types
-
)
-
end
-
-
if user_industries.any?
-
recommendations = recommendations.where(industry: user_industries)
-
end
-
-
# Fallback to popular templates if no specific recommendations
-
if recommendations.empty?
-
recommendations = JourneyTemplate.published.order(usage_count: :desc)
-
end
-
-
limit = [params[:limit].to_i, 10].max
-
limit = [limit, 20].min
-
-
render_success(
-
data: recommendations.limit(limit).map { |t| serialize_template_summary(t) }
-
)
-
end
-
-
# POST /api/v1/templates/:id/rate
-
def rate
-
rating = params[:rating].to_f
-
comment = params[:comment]
-
-
unless (1..5).include?(rating)
-
return render_error(message: 'Rating must be between 1 and 5')
-
end
-
-
# Store rating in template metadata
-
ratings = @template.metadata['ratings'] || []
-
ratings << {
-
user_id: current_user.id,
-
rating: rating,
-
comment: comment,
-
created_at: Time.current
-
}
-
-
@template.metadata['ratings'] = ratings
-
-
# Calculate average rating
-
avg_rating = ratings.sum { |r| r['rating'] } / ratings.count.to_f
-
@template.metadata['rating'] = avg_rating.round(2)
-
-
@template.save!
-
-
render_success(
-
data: { rating: avg_rating, total_ratings: ratings.count },
-
message: 'Rating submitted successfully'
-
)
-
end
-
-
private
-
-
def set_template
-
@template = JourneyTemplate.find(params[:id])
-
end
-
-
def template_params
-
params.require(:template).permit(
-
:name, :description, :category, :industry, :is_public, :status,
-
steps_template: [], metadata: {}
-
)
-
end
-
-
def serialize_template_summary(template)
-
{
-
id: template.id,
-
name: template.name,
-
description: template.description,
-
category: template.category,
-
industry: template.industry,
-
author: template.user.name,
-
usage_count: template.usage_count,
-
rating: template.metadata['rating'],
-
total_ratings: (template.metadata['ratings'] || []).count,
-
difficulty: template.metadata['difficulty'],
-
estimated_duration: template.metadata['estimated_duration'],
-
step_count: (template.steps_template || []).count,
-
created_at: template.created_at,
-
updated_at: template.updated_at
-
}
-
end
-
-
def serialize_template_detail(template)
-
{
-
id: template.id,
-
name: template.name,
-
description: template.description,
-
category: template.category,
-
industry: template.industry,
-
is_public: template.is_public,
-
status: template.status,
-
author: {
-
id: template.user.id,
-
name: template.user.name
-
},
-
usage_count: template.usage_count,
-
rating: template.metadata['rating'],
-
total_ratings: (template.metadata['ratings'] || []).count,
-
steps_template: template.steps_template,
-
metadata: template.metadata,
-
version: template.version,
-
created_at: template.created_at,
-
updated_at: template.updated_at
-
}
-
end
-
-
def serialize_instantiated_journey(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
template_id: journey.metadata['template_id'],
-
created_at: journey.created_at
-
}
-
end
-
end
-
class Api::V1::JourneysController < Api::V1::BaseController
-
before_action :set_journey, only: [:show, :update, :destroy, :duplicate, :publish, :archive, :analytics, :execution_status]
-
-
# GET /api/v1/journeys
-
def index
-
journeys = current_user.journeys.includes(:campaign, :persona, :journey_steps)
-
-
# Apply filters
-
journeys = journeys.where(status: params[:status]) if params[:status].present?
-
journeys = journeys.where(campaign_type: params[:campaign_type]) if params[:campaign_type].present?
-
journeys = journeys.joins(:campaign).where(campaigns: { id: params[:campaign_id] }) if params[:campaign_id].present?
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'name'
-
journeys = journeys.order(:name)
-
when 'created_at'
-
journeys = journeys.order(:created_at)
-
when 'updated_at'
-
journeys = journeys.order(:updated_at)
-
when 'status'
-
journeys = journeys.order(:status)
-
else
-
journeys = journeys.order(updated_at: :desc)
-
end
-
-
paginate_and_render(journeys, serializer: method(:serialize_journey_summary))
-
end
-
-
# GET /api/v1/journeys/:id
-
def show
-
render_success(data: serialize_journey_detail(@journey))
-
end
-
-
# POST /api/v1/journeys
-
def create
-
journey = current_user.journeys.build(journey_params)
-
-
if journey.save
-
render_success(
-
data: serialize_journey_detail(journey),
-
message: 'Journey created successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create journey',
-
errors: journey.errors.as_json
-
)
-
end
-
end
-
-
# PUT /api/v1/journeys/:id
-
def update
-
if @journey.update(journey_params)
-
render_success(
-
data: serialize_journey_detail(@journey),
-
message: 'Journey updated successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to update journey',
-
errors: @journey.errors.as_json
-
)
-
end
-
end
-
-
# DELETE /api/v1/journeys/:id
-
def destroy
-
@journey.destroy!
-
render_success(message: 'Journey deleted successfully')
-
end
-
-
# POST /api/v1/journeys/:id/duplicate
-
def duplicate
-
begin
-
new_journey = @journey.duplicate
-
render_success(
-
data: serialize_journey_detail(new_journey),
-
message: 'Journey duplicated successfully',
-
status: :created
-
)
-
rescue => e
-
render_error(message: "Failed to duplicate journey: #{e.message}")
-
end
-
end
-
-
# POST /api/v1/journeys/:id/publish
-
def publish
-
if @journey.publish!
-
render_success(
-
data: serialize_journey_detail(@journey),
-
message: 'Journey published successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to publish journey',
-
errors: @journey.errors.as_json
-
)
-
end
-
end
-
-
# POST /api/v1/journeys/:id/archive
-
def archive
-
if @journey.archive!
-
render_success(
-
data: serialize_journey_detail(@journey),
-
message: 'Journey archived successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to archive journey',
-
errors: @journey.errors.as_json
-
)
-
end
-
end
-
-
# GET /api/v1/journeys/:id/analytics
-
def analytics
-
days = [params[:days].to_i, 1].max
-
days = [days, 365].min # Cap at 1 year
-
-
analytics_data = {
-
summary: @journey.analytics_summary(days),
-
performance_score: @journey.latest_performance_score,
-
funnel_performance: @journey.funnel_performance('default', days),
-
trends: @journey.performance_trends(7),
-
ab_test_status: @journey.ab_test_status
-
}
-
-
render_success(data: analytics_data)
-
end
-
-
# GET /api/v1/journeys/:id/execution_status
-
def execution_status
-
executions = @journey.journey_executions
-
.includes(:step_executions)
-
.order(created_at: :desc)
-
.limit(params[:limit]&.to_i || 10)
-
-
execution_data = executions.map do |execution|
-
{
-
id: execution.id,
-
status: execution.status,
-
started_at: execution.started_at,
-
completed_at: execution.completed_at,
-
current_step_id: execution.current_step_id,
-
step_count: execution.step_executions.count,
-
completion_percentage: execution.completion_percentage,
-
metadata: execution.metadata
-
}
-
end
-
-
render_success(data: execution_data)
-
end
-
-
private
-
-
def set_journey
-
@journey = current_user.journeys.find(params[:id])
-
end
-
-
def journey_params
-
params.require(:journey).permit(
-
:name, :description, :campaign_type, :target_audience, :status,
-
:campaign_id, goals: [], metadata: {}, settings: {}
-
)
-
end
-
-
def serialize_journey_summary(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
campaign_type: journey.campaign_type,
-
campaign_id: journey.campaign_id,
-
campaign_name: journey.campaign&.name,
-
persona_name: journey.persona&.name,
-
step_count: journey.total_steps,
-
created_at: journey.created_at,
-
updated_at: journey.updated_at,
-
published_at: journey.published_at,
-
performance_score: journey.latest_performance_score
-
}
-
end
-
-
def serialize_journey_detail(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience,
-
goals: journey.goals,
-
metadata: journey.metadata,
-
settings: journey.settings,
-
campaign_id: journey.campaign_id,
-
campaign: journey.campaign ? serialize_campaign_summary(journey.campaign) : nil,
-
persona: journey.persona ? serialize_persona_summary(journey.persona) : nil,
-
step_count: journey.total_steps,
-
steps_by_stage: journey.steps_by_stage,
-
created_at: journey.created_at,
-
updated_at: journey.updated_at,
-
published_at: journey.published_at,
-
archived_at: journey.archived_at,
-
performance_score: journey.latest_performance_score,
-
ab_test_status: journey.ab_test_status
-
}
-
end
-
-
def serialize_campaign_summary(campaign)
-
{
-
id: campaign.id,
-
name: campaign.name,
-
campaign_type: campaign.campaign_type,
-
status: campaign.status
-
}
-
end
-
-
def serialize_persona_summary(persona)
-
{
-
id: persona.id,
-
name: persona.name,
-
demographic_data: persona.demographic_data,
-
psychographic_data: persona.psychographic_data
-
}
-
end
-
end
-
class Api::V1::PersonasController < Api::V1::BaseController
-
before_action :set_persona, only: [:show, :update, :destroy, :campaigns, :performance]
-
-
# GET /api/v1/personas
-
def index
-
personas = current_user.personas.includes(:campaigns)
-
-
# Apply filters
-
personas = personas.where('age_range && ?', params[:age_range]) if params[:age_range].present?
-
personas = personas.where('location ILIKE ?', "%#{params[:location]}%") if params[:location].present?
-
personas = personas.where('industry ILIKE ?', "%#{params[:industry]}%") if params[:industry].present?
-
-
# Apply search
-
if params[:search].present?
-
personas = personas.where(
-
'name ILIKE ? OR description ILIKE ?',
-
"%#{params[:search]}%", "%#{params[:search]}%"
-
)
-
end
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'name'
-
personas = personas.order(:name)
-
when 'age_range'
-
personas = personas.order(:age_range)
-
when 'location'
-
personas = personas.order(:location)
-
when 'created_at'
-
personas = personas.order(:created_at)
-
else
-
personas = personas.order(:name)
-
end
-
-
paginate_and_render(personas, serializer: method(:serialize_persona_summary))
-
end
-
-
# GET /api/v1/personas/:id
-
def show
-
render_success(data: serialize_persona_detail(@persona))
-
end
-
-
# POST /api/v1/personas
-
def create
-
persona = current_user.personas.build(persona_params)
-
-
if persona.save
-
render_success(
-
data: serialize_persona_detail(persona),
-
message: 'Persona created successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create persona',
-
errors: persona.errors.as_json
-
)
-
end
-
end
-
-
# PUT /api/v1/personas/:id
-
def update
-
if @persona.update(persona_params)
-
render_success(
-
data: serialize_persona_detail(@persona),
-
message: 'Persona updated successfully'
-
)
-
else
-
render_error(
-
message: 'Failed to update persona',
-
errors: @persona.errors.as_json
-
)
-
end
-
end
-
-
# DELETE /api/v1/personas/:id
-
def destroy
-
if @persona.campaigns.any?
-
render_error(
-
message: 'Cannot delete persona with associated campaigns',
-
code: 'PERSONA_IN_USE'
-
)
-
else
-
@persona.destroy!
-
render_success(message: 'Persona deleted successfully')
-
end
-
end
-
-
# GET /api/v1/personas/:id/campaigns
-
def campaigns
-
campaigns = @persona.campaigns.includes(:journeys)
-
-
# Apply filters
-
campaigns = campaigns.where(status: params[:status]) if params[:status].present?
-
campaigns = campaigns.where(campaign_type: params[:campaign_type]) if params[:campaign_type].present?
-
-
paginate_and_render(campaigns, serializer: method(:serialize_campaign_for_persona))
-
end
-
-
# GET /api/v1/personas/:id/performance
-
def performance
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
# Get campaigns and journeys associated with this persona
-
campaigns = @persona.campaigns.includes(:journeys)
-
journeys = campaigns.flat_map(&:journeys)
-
-
performance_data = {
-
summary: calculate_persona_summary(@persona, journeys, days),
-
campaign_performance: calculate_persona_campaign_performance(campaigns, days),
-
journey_performance: calculate_persona_journey_performance(journeys, days),
-
engagement_patterns: calculate_persona_engagement_patterns(@persona, days),
-
conversion_insights: calculate_persona_conversion_insights(@persona, days),
-
demographic_insights: calculate_demographic_insights(@persona),
-
recommendations: generate_persona_recommendations(@persona, performance_data)
-
}
-
-
render_success(data: performance_data)
-
end
-
-
# POST /api/v1/personas/:id/clone
-
def clone
-
begin
-
new_persona = @persona.dup
-
new_persona.name = "#{@persona.name} (Copy)"
-
new_persona.save!
-
-
render_success(
-
data: serialize_persona_detail(new_persona),
-
message: 'Persona cloned successfully',
-
status: :created
-
)
-
rescue => e
-
render_error(message: "Failed to clone persona: #{e.message}")
-
end
-
end
-
-
# GET /api/v1/personas/templates
-
def templates
-
# Predefined persona templates
-
templates = [
-
{
-
name: 'Young Professional',
-
age_range: '25-35',
-
location: 'Urban',
-
demographic_data: {
-
income_range: '$50,000-$75,000',
-
education: 'College Graduate',
-
employment: 'Full-time Professional'
-
},
-
psychographic_data: {
-
interests: ['Career Growth', 'Technology', 'Fitness'],
-
values: ['Work-life Balance', 'Innovation', 'Achievement'],
-
lifestyle: 'Fast-paced, Digital-first'
-
}
-
},
-
{
-
name: 'Family-Oriented Parent',
-
age_range: '30-45',
-
location: 'Suburban',
-
demographic_data: {
-
income_range: '$60,000-$100,000',
-
education: 'College Graduate',
-
family_status: 'Married with Children'
-
},
-
psychographic_data: {
-
interests: ['Family Activities', 'Home Improvement', 'Education'],
-
values: ['Family', 'Security', 'Quality'],
-
lifestyle: 'Family-focused, Value-conscious'
-
}
-
},
-
{
-
name: 'Small Business Owner',
-
age_range: '35-55',
-
location: 'Various',
-
demographic_data: {
-
income_range: '$75,000-$150,000',
-
education: 'College/Trade School',
-
employment: 'Business Owner'
-
},
-
psychographic_data: {
-
interests: ['Business Growth', 'Networking', 'Industry Trends'],
-
values: ['Independence', 'Success', 'Innovation'],
-
lifestyle: 'Busy, Results-oriented'
-
}
-
}
-
]
-
-
render_success(data: templates)
-
end
-
-
# POST /api/v1/personas/from_template
-
def create_from_template
-
template_data = params.require(:template).permit!
-
-
persona = current_user.personas.build(
-
name: template_data[:name],
-
description: "Created from #{template_data[:name]} template",
-
age_range: template_data[:age_range],
-
location: template_data[:location],
-
demographic_data: template_data[:demographic_data] || {},
-
psychographic_data: template_data[:psychographic_data] || {}
-
)
-
-
if persona.save
-
render_success(
-
data: serialize_persona_detail(persona),
-
message: 'Persona created from template successfully',
-
status: :created
-
)
-
else
-
render_error(
-
message: 'Failed to create persona from template',
-
errors: persona.errors.as_json
-
)
-
end
-
end
-
-
# GET /api/v1/personas/analytics_overview
-
def analytics_overview
-
days = [params[:days].to_i, 30].max
-
days = [days, 365].min
-
-
personas = current_user.personas.includes(:campaigns)
-
-
overview_data = {
-
total_personas: personas.count,
-
active_personas: personas.joins(:campaigns).where(campaigns: { status: 'active' }).distinct.count,
-
top_performing: find_top_performing_personas(5, days),
-
demographic_breakdown: calculate_demographic_breakdown(personas),
-
usage_statistics: calculate_persona_usage_statistics(personas, days)
-
}
-
-
render_success(data: overview_data)
-
end
-
-
private
-
-
def set_persona
-
@persona = current_user.personas.find(params[:id])
-
end
-
-
def persona_params
-
params.require(:persona).permit(
-
:name, :description, :age_range, :location, :industry,
-
demographic_data: {}, psychographic_data: {}, behavioral_data: {}
-
)
-
end
-
-
def serialize_persona_summary(persona)
-
{
-
id: persona.id,
-
name: persona.name,
-
description: persona.description,
-
age_range: persona.age_range,
-
location: persona.location,
-
industry: persona.industry,
-
campaign_count: persona.campaigns.count,
-
created_at: persona.created_at,
-
updated_at: persona.updated_at
-
}
-
end
-
-
def serialize_persona_detail(persona)
-
{
-
id: persona.id,
-
name: persona.name,
-
description: persona.description,
-
age_range: persona.age_range,
-
location: persona.location,
-
industry: persona.industry,
-
demographic_data: persona.demographic_data,
-
psychographic_data: persona.psychographic_data,
-
behavioral_data: persona.behavioral_data,
-
campaign_count: persona.campaigns.count,
-
campaigns: persona.campaigns.limit(5).map { |c| serialize_campaign_for_persona(c) },
-
created_at: persona.created_at,
-
updated_at: persona.updated_at
-
}
-
end
-
-
def serialize_campaign_for_persona(campaign)
-
{
-
id: campaign.id,
-
name: campaign.name,
-
campaign_type: campaign.campaign_type,
-
status: campaign.status,
-
journey_count: campaign.journeys.count
-
}
-
end
-
-
def calculate_persona_summary(persona, journeys, days)
-
{
-
persona_name: persona.name,
-
total_campaigns: persona.campaigns.count,
-
total_journeys: journeys.count,
-
performance_score: calculate_persona_performance_score(journeys, days)
-
}
-
end
-
-
def calculate_persona_campaign_performance(campaigns, days)
-
campaigns.map do |campaign|
-
journeys = campaign.journeys
-
avg_performance = journeys.map(&:latest_performance_score).compact
-
avg_score = avg_performance.any? ? (avg_performance.sum.to_f / avg_performance.count).round(1) : 0
-
-
{
-
id: campaign.id,
-
name: campaign.name,
-
status: campaign.status,
-
journey_count: journeys.count,
-
average_performance_score: avg_score
-
}
-
end
-
end
-
-
def calculate_persona_journey_performance(journeys, days)
-
journeys.map do |journey|
-
{
-
id: journey.id,
-
name: journey.name,
-
performance_score: journey.latest_performance_score,
-
conversion_rate: journey.current_analytics&.conversion_rate || 0,
-
status: journey.status
-
}
-
end
-
end
-
-
def calculate_persona_engagement_patterns(persona, days)
-
# Analyze engagement patterns for this persona
-
campaigns = persona.campaigns
-
-
{
-
preferred_journey_types: analyze_preferred_journey_types(campaigns),
-
optimal_touchpoint_frequency: analyze_touchpoint_frequency(campaigns),
-
engagement_peak_times: analyze_engagement_times(campaigns),
-
channel_preferences: analyze_channel_preferences(campaigns)
-
}
-
end
-
-
def calculate_persona_conversion_insights(persona, days)
-
campaigns = persona.campaigns
-
journeys = campaigns.flat_map(&:journeys)
-
-
{
-
average_conversion_rate: calculate_average_conversion_rate(journeys),
-
conversion_triggers: identify_conversion_triggers(journeys),
-
optimal_journey_length: calculate_optimal_journey_length(journeys),
-
successful_touchpoints: identify_successful_touchpoints(journeys)
-
}
-
end
-
-
def calculate_demographic_insights(persona)
-
# Analyze how demographic factors influence performance
-
{
-
age_segment_performance: analyze_age_segment_performance(persona),
-
location_impact: analyze_location_impact(persona),
-
industry_relevance: analyze_industry_relevance(persona)
-
}
-
end
-
-
def generate_persona_recommendations(persona, performance_data)
-
recommendations = []
-
-
# Generate recommendations based on performance data
-
if performance_data[:summary][:performance_score] < 50
-
recommendations << "Consider adjusting journey content to better match persona interests"
-
end
-
-
if persona.campaigns.count == 0
-
recommendations << "Create campaigns targeting this persona to gather performance data"
-
end
-
-
recommendations
-
end
-
-
def find_top_performing_personas(limit, days)
-
current_user.personas
-
.joins(campaigns: { journeys: :journey_analytics })
-
.group('personas.id, personas.name')
-
.order('AVG(journey_analytics.conversion_rate) DESC')
-
.limit(limit)
-
.pluck('personas.id, personas.name, AVG(journey_analytics.conversion_rate)')
-
.map { |id, name, rate| { id: id, name: name, conversion_rate: rate&.round(2) || 0 } }
-
end
-
-
def calculate_demographic_breakdown(personas)
-
{
-
age_ranges: personas.group(:age_range).count,
-
locations: personas.group(:location).count,
-
industries: personas.group(:industry).count
-
}
-
end
-
-
def calculate_persona_usage_statistics(personas, days)
-
active_campaigns = personas.joins(:campaigns).where(campaigns: { status: 'active' }).count
-
-
{
-
personas_with_active_campaigns: active_campaigns,
-
average_campaigns_per_persona: personas.joins(:campaigns).group('personas.id').count.values.sum.to_f / personas.count,
-
most_used_persona: personas.joins(:campaigns).group('personas.id, personas.name').count.max_by { |_, count| count }
-
}
-
end
-
-
def calculate_persona_performance_score(journeys, days)
-
return 0.0 if journeys.empty?
-
-
scores = journeys.map(&:latest_performance_score).compact
-
return 0.0 if scores.empty?
-
-
(scores.sum.to_f / scores.count).round(1)
-
end
-
-
def analyze_preferred_journey_types(campaigns)
-
# Placeholder for journey type analysis
-
[]
-
end
-
-
def analyze_touchpoint_frequency(campaigns)
-
# Placeholder for touchpoint frequency analysis
-
'weekly'
-
end
-
-
def analyze_engagement_times(campaigns)
-
# Placeholder for engagement time analysis
-
[]
-
end
-
-
def analyze_channel_preferences(campaigns)
-
# Placeholder for channel preference analysis
-
[]
-
end
-
-
def calculate_average_conversion_rate(journeys)
-
return 0.0 if journeys.empty?
-
-
rates = journeys.map { |j| j.current_analytics&.conversion_rate || 0 }
-
(rates.sum.to_f / rates.count).round(2)
-
end
-
-
def identify_conversion_triggers(journeys)
-
# Placeholder for conversion trigger analysis
-
[]
-
end
-
-
def calculate_optimal_journey_length(journeys)
-
# Placeholder for optimal journey length calculation
-
5
-
end
-
-
def identify_successful_touchpoints(journeys)
-
# Placeholder for successful touchpoint identification
-
[]
-
end
-
-
def analyze_age_segment_performance(persona)
-
# Placeholder for age segment analysis
-
{}
-
end
-
-
def analyze_location_impact(persona)
-
# Placeholder for location impact analysis
-
{}
-
end
-
-
def analyze_industry_relevance(persona)
-
# Placeholder for industry relevance analysis
-
{}
-
end
-
end
-
2
class ApplicationController < ActionController::Base
-
2
include Authentication
-
2
include Pundit::Authorization
-
2
include RailsAdminAuditable
-
2
include ActivityTracker
-
-
# Only allow modern browsers supporting webp images, web push, badges, import maps, CSS nesting, and CSS :has.
-
2
allow_browser versions: :modern
-
-
# Error handling for production
-
2
unless Rails.env.development? || Rails.env.test?
-
rescue_from StandardError, with: :handle_internal_server_error
-
rescue_from ActionController::RoutingError, with: :handle_not_found
-
rescue_from ActionController::UnknownController, with: :handle_not_found
-
rescue_from AbstractController::ActionNotFound, with: :handle_not_found
-
rescue_from ActiveRecord::RecordNotFound, with: :handle_not_found
-
end
-
-
# Pundit authorization error handling
-
2
rescue_from Pundit::NotAuthorizedError, with: :user_not_authorized
-
2
rescue_from ActionController::InvalidAuthenticityToken, with: :handle_invalid_token
-
2
rescue_from ActionController::UnpermittedParameters, with: :handle_unpermitted_parameters
-
-
2
private
-
-
2
def user_not_authorized
-
flash[:alert] = "You are not authorized to perform this action."
-
redirect_back(fallback_location: root_path)
-
end
-
-
2
def handle_not_found(exception = nil)
-
log_error_with_context(exception, :not_found) if exception
-
-
respond_to do |format|
-
format.html { render template: 'errors/404', status: :not_found }
-
format.json { render json: { error: 'Not found', status: 404 }, status: :not_found }
-
format.all { render plain: 'Not found', status: :not_found }
-
end
-
end
-
-
2
def handle_invalid_token(exception = nil)
-
log_error_with_context(exception, :invalid_token) if exception
-
-
respond_to do |format|
-
format.html {
-
flash[:alert] = "Your session has expired. Please try again."
-
redirect_to request.referrer || root_path
-
}
-
format.json { render json: { error: 'Invalid authenticity token', status: 422 }, status: :unprocessable_entity }
-
end
-
end
-
-
2
def handle_unpermitted_parameters(exception = nil)
-
log_error_with_context(exception, :unpermitted_parameters) if exception
-
-
respond_to do |format|
-
format.html { render template: 'errors/422', status: :unprocessable_entity }
-
format.json { render json: { error: 'Unpermitted parameters', status: 422 }, status: :unprocessable_entity }
-
end
-
end
-
-
2
def handle_internal_server_error(exception = nil)
-
log_error_with_context(exception, :internal_server_error) if exception
-
-
# Notify error tracking service (Sentry, Rollbar, etc.)
-
notify_error_service(exception) if exception && Rails.env.production?
-
-
respond_to do |format|
-
format.html { render template: 'errors/500', status: :internal_server_error }
-
format.json { render json: { error: 'Internal server error', status: 500 }, status: :internal_server_error }
-
format.all { render plain: 'Internal server error', status: :internal_server_error }
-
end
-
end
-
-
2
def log_error_with_context(exception, error_type)
-
error_context = {
-
exception_class: exception.class.name,
-
exception_message: exception.message,
-
backtrace: exception.backtrace&.first(10),
-
request_path: request.path,
-
request_method: request.method,
-
user_agent: request.user_agent,
-
ip_address: request.remote_ip,
-
user_id: current_user&.id,
-
session_id: session.id,
-
params: request.filtered_parameters.except('authenticity_token', 'commit'),
-
referrer: request.referrer
-
}
-
-
case error_type
-
when :not_found
-
ActivityLogger.log(:info, "#{exception.class}: #{exception.message}", error_context)
-
when :invalid_token, :unpermitted_parameters
-
ActivityLogger.security('authentication_failure', exception.message, error_context)
-
when :internal_server_error
-
ActivityLogger.security('system_error', "#{exception.class}: #{exception.message}", error_context)
-
end
-
end
-
-
2
def notify_error_service(exception)
-
# Integration point for error tracking services
-
# Example: Sentry.capture_exception(exception)
-
Rails.logger.error "CRITICAL ERROR: #{exception.class} - #{exception.message}\n#{exception.backtrace&.join("\n")}"
-
end
-
end
-
class BrandAssetsController < ApplicationController
-
before_action :set_brand
-
before_action :set_brand_asset, only: [:show, :edit, :update, :destroy, :reprocess, :download]
-
-
def index
-
@brand_assets = @brand.brand_assets.includes(:file_attachment)
-
end
-
-
def show
-
end
-
-
def new
-
@brand_asset = @brand.brand_assets.build
-
end
-
-
def create
-
if params[:brand_asset][:files].present?
-
# Handle multiple file uploads
-
@brand_assets = []
-
@errors = []
-
-
params[:brand_asset][:files].each do |file|
-
brand_asset = @brand.brand_assets.build(
-
file: file,
-
asset_type: determine_asset_type(file),
-
original_filename: file.original_filename
-
)
-
-
if brand_asset.save
-
@brand_assets << brand_asset
-
else
-
@errors << { filename: file.original_filename, errors: brand_asset.errors.full_messages }
-
end
-
end
-
-
if request.xhr?
-
render json: {
-
success: @errors.empty?,
-
assets: @brand_assets.map { |asset| asset_json(asset) },
-
errors: @errors
-
}
-
else
-
if @errors.empty?
-
redirect_to brand_brand_assets_path(@brand),
-
notice: "#{@brand_assets.count} asset(s) uploaded successfully."
-
else
-
flash[:alert] = "Some files failed to upload: #{@errors.map { |e| e[:filename] }.join(', ')}"
-
redirect_to new_brand_brand_asset_path(@brand)
-
end
-
end
-
else
-
# Handle single file upload
-
@brand_asset = @brand.brand_assets.build(brand_asset_params)
-
-
if @brand_asset.save
-
if request.xhr?
-
render json: { success: true, asset: asset_json(@brand_asset) }
-
else
-
redirect_to brand_brand_asset_path(@brand, @brand_asset),
-
notice: 'Brand asset was successfully uploaded and is being processed.'
-
end
-
else
-
if request.xhr?
-
render json: { success: false, errors: @brand_asset.errors.full_messages }, status: :unprocessable_entity
-
else
-
render :new, status: :unprocessable_entity
-
end
-
end
-
end
-
end
-
-
def edit
-
end
-
-
def update
-
if @brand_asset.update(brand_asset_params)
-
redirect_to brand_brand_asset_path(@brand, @brand_asset),
-
notice: 'Brand asset was successfully updated.'
-
else
-
render :edit, status: :unprocessable_entity
-
end
-
end
-
-
def destroy
-
@brand_asset.destroy!
-
redirect_to brand_brand_assets_url(@brand),
-
notice: 'Brand asset was successfully destroyed.'
-
end
-
-
def reprocess
-
@brand_asset.update!(processing_status: 'pending')
-
BrandAssetProcessingJob.perform_later(@brand_asset)
-
-
redirect_to brand_brand_asset_path(@brand, @brand_asset),
-
notice: 'Brand asset is being reprocessed.'
-
end
-
-
def download
-
if @brand_asset.file.attached?
-
redirect_to rails_blob_url(@brand_asset.file, disposition: "attachment")
-
else
-
redirect_to brand_brand_assets_url(@brand),
-
alert: 'No file attached to this asset.'
-
end
-
end
-
-
# AJAX endpoint for upload status
-
def status
-
@brand_asset = @brand.brand_assets.find(params[:id])
-
render json: asset_json(@brand_asset)
-
end
-
-
# AJAX endpoint for batch status check
-
def batch_status
-
asset_ids = params[:asset_ids].split(',')
-
@brand_assets = @brand.brand_assets.where(id: asset_ids)
-
render json: {
-
assets: @brand_assets.map { |asset| asset_json(asset) }
-
}
-
end
-
-
private
-
-
def set_brand
-
@brand = current_user.brands.find(params[:brand_id])
-
end
-
-
def set_brand_asset
-
@brand_asset = @brand.brand_assets.find(params[:id])
-
end
-
-
def brand_asset_params
-
params.require(:brand_asset).permit(:file, :asset_type, :original_filename)
-
end
-
-
def determine_asset_type(file)
-
content_type = file.content_type
-
filename = file.original_filename.downcase
-
-
case content_type
-
when *BrandAsset::ALLOWED_CONTENT_TYPES[:image]
-
return 'logo' if filename.include?('logo')
-
'image'
-
when *BrandAsset::ALLOWED_CONTENT_TYPES[:document]
-
return 'brand_guidelines' if filename.include?('guideline') || filename.include?('brand')
-
return 'style_guide' if filename.include?('style')
-
'document'
-
when *BrandAsset::ALLOWED_CONTENT_TYPES[:video]
-
'video'
-
else
-
'document' # Default fallback
-
end
-
end
-
-
def asset_json(asset)
-
{
-
id: asset.id,
-
filename: asset.original_filename,
-
asset_type: asset.asset_type,
-
processing_status: asset.processing_status,
-
file_size: asset.file_size_mb.round(2),
-
content_type: asset.file.attached? ? asset.file.content_type : nil,
-
url: asset.file.attached? ? rails_blob_path(asset.file) : nil,
-
download_url: brand_brand_asset_path(@brand, asset, format: :download),
-
created_at: asset.created_at.iso8601,
-
processed_at: asset.processed_at&.iso8601
-
}
-
end
-
end
-
class BrandGuidelinesController < ApplicationController
-
before_action :set_brand
-
before_action :set_brand_guideline, only: [:show, :edit, :update, :destroy]
-
-
def index
-
@guidelines_by_category = @brand.brand_guidelines.active.ordered
-
.group_by(&:category)
-
end
-
-
def show
-
end
-
-
def new
-
@brand_guideline = @brand.brand_guidelines.build
-
end
-
-
def create
-
@brand_guideline = @brand.brand_guidelines.build(brand_guideline_params)
-
-
if @brand_guideline.save
-
redirect_to brand_brand_guidelines_path(@brand),
-
notice: 'Brand guideline was successfully created.'
-
else
-
render :new, status: :unprocessable_entity
-
end
-
end
-
-
def edit
-
end
-
-
def update
-
if @brand_guideline.update(brand_guideline_params)
-
redirect_to brand_brand_guidelines_path(@brand),
-
notice: 'Brand guideline was successfully updated.'
-
else
-
render :edit, status: :unprocessable_entity
-
end
-
end
-
-
def destroy
-
@brand_guideline.destroy!
-
redirect_to brand_brand_guidelines_path(@brand),
-
notice: 'Brand guideline was successfully destroyed.'
-
end
-
-
private
-
-
def set_brand
-
@brand = current_user.brands.find(params[:brand_id])
-
end
-
-
def set_brand_guideline
-
@brand_guideline = @brand.brand_guidelines.find(params[:id])
-
end
-
-
def brand_guideline_params
-
params.require(:brand_guideline).permit(
-
:rule_type,
-
:rule_content,
-
:category,
-
:priority,
-
:active,
-
examples: {},
-
metadata: {}
-
)
-
end
-
end
-
class BrandsController < ApplicationController
-
before_action :set_brand, only: [:show, :edit, :update, :destroy, :compliance_check, :check_content_compliance]
-
-
def index
-
@brands = current_user.brands.active.includes(:brand_assets, :latest_analysis)
-
end
-
-
def show
-
@latest_analysis = @brand.latest_analysis
-
@brand_assets = @brand.brand_assets.includes(:file_attachment)
-
@guidelines = @brand.brand_guidelines.active.ordered
-
@messaging_framework = @brand.messaging_framework
-
end
-
-
def new
-
@brand = current_user.brands.build
-
end
-
-
def create
-
@brand = current_user.brands.build(brand_params)
-
-
if @brand.save
-
redirect_to @brand, notice: 'Brand was successfully created.'
-
else
-
render :new, status: :unprocessable_entity
-
end
-
end
-
-
def edit
-
end
-
-
def update
-
if @brand.update(brand_params)
-
redirect_to @brand, notice: 'Brand was successfully updated.'
-
else
-
render :edit, status: :unprocessable_entity
-
end
-
end
-
-
def destroy
-
@brand.destroy!
-
redirect_to brands_url, notice: 'Brand was successfully destroyed.'
-
end
-
-
def compliance_check
-
@compliance_form = ComplianceCheckForm.new
-
end
-
-
def check_content_compliance
-
content = params[:content]
-
content_type = params[:content_type] || 'general'
-
-
service = Branding::ComplianceService.new(@brand, content, content_type)
-
result = service.validate_and_suggest
-
-
respond_to do |format|
-
format.json { render json: result }
-
format.html do
-
@compliance_result = result
-
render :compliance_result
-
end
-
end
-
end
-
-
private
-
-
def set_brand
-
@brand = current_user.brands.find(params[:id])
-
end
-
-
def brand_params
-
params.require(:brand).permit(
-
:name,
-
:description,
-
:industry,
-
:website,
-
:active,
-
color_scheme: {},
-
typography: {},
-
settings: {}
-
)
-
end
-
end
-
# frozen_string_literal: true
-
-
module ActivityTrackable
-
extend ActiveSupport::Concern
-
-
included do
-
# Track activity for all actions by default
-
after_action :track_user_activity
-
end
-
-
private
-
-
def track_user_activity
-
return unless should_track_activity?
-
-
UserActivity.log_activity(
-
current_user,
-
determine_activity_action,
-
controller_name: controller_name,
-
action_name: action_name,
-
resource_type: determine_resource_type,
-
resource_id: determine_resource_id,
-
ip_address: request.remote_ip,
-
user_agent: request.user_agent,
-
request_params: filtered_params,
-
metadata: activity_metadata
-
)
-
rescue StandardError => e
-
Rails.logger.error "Failed to track user activity: #{e.message}"
-
Rails.logger.error e.backtrace.join("\n")
-
end
-
-
def should_track_activity?
-
# Only track if user is authenticated
-
return false unless current_user.present?
-
-
# Skip tracking for certain controllers/actions
-
skip_controllers = %w[rails_admin]
-
skip_actions = %w[show index]
-
-
return false if skip_controllers.include?(controller_name)
-
return false if skip_actions.include?(action_name) && request.get?
-
-
true
-
end
-
-
def determine_activity_action
-
case action_name
-
when 'create'
-
UserActivity::ACTIVITY_TYPES[:create]
-
when 'update', 'edit'
-
UserActivity::ACTIVITY_TYPES[:update]
-
when 'destroy'
-
UserActivity::ACTIVITY_TYPES[:delete]
-
when 'download'
-
UserActivity::ACTIVITY_TYPES[:download]
-
when 'upload'
-
UserActivity::ACTIVITY_TYPES[:upload]
-
else
-
# Map specific controller actions
-
if controller_name == 'sessions' && action_name == 'create'
-
UserActivity::ACTIVITY_TYPES[:login]
-
elsif controller_name == 'sessions' && action_name == 'destroy'
-
UserActivity::ACTIVITY_TYPES[:logout]
-
elsif controller_name == 'passwords' && action_name == 'create'
-
UserActivity::ACTIVITY_TYPES[:password_reset]
-
elsif controller_name == 'profiles' && action_name == 'update'
-
UserActivity::ACTIVITY_TYPES[:profile_update]
-
else
-
action_name
-
end
-
end
-
end
-
-
def determine_resource_type
-
# Try to infer resource type from controller name
-
return nil if params[:controller].blank?
-
-
controller_parts = params[:controller].split('/')
-
resource_name = controller_parts.last.singularize.camelize
-
-
# Check if it's a valid model
-
begin
-
resource_name.constantize
-
resource_name
-
rescue NameError
-
nil
-
end
-
end
-
-
def determine_resource_id
-
# Common parameter names for resource IDs
-
id_params = [:id, :resource_id, "#{controller_name.singularize}_id".to_sym]
-
-
id_params.each do |param|
-
return params[param] if params[param].present?
-
end
-
-
nil
-
end
-
-
def filtered_params
-
# Filter sensitive parameters
-
filtered = params.except(
-
:password,
-
:password_confirmation,
-
:token,
-
:secret,
-
:api_key,
-
:access_token,
-
:refresh_token,
-
:authenticity_token
-
)
-
-
# Convert to hash and limit size
-
filtered.to_unsafe_h.slice(*allowed_param_keys).to_json
-
rescue StandardError
-
'{}'
-
end
-
-
def allowed_param_keys
-
# Define which parameters to log
-
%w[action controller id page per_page search filter sort order]
-
end
-
-
def activity_metadata
-
{
-
session_id: session.id,
-
referer: request.referer,
-
method: request.method,
-
path: request.path,
-
timestamp: Time.current.iso8601
-
}
-
end
-
-
# Helper method to track specific activities
-
def track_activity(action, options = {})
-
return unless current_user.present?
-
-
UserActivity.log_activity(
-
current_user,
-
action,
-
options.merge(
-
controller_name: controller_name,
-
action_name: action_name,
-
ip_address: request.remote_ip,
-
user_agent: request.user_agent
-
)
-
)
-
end
-
-
# Track failed login attempts (call this manually in sessions controller)
-
def track_failed_login(email)
-
user = User.find_by(email: email)
-
return unless user
-
-
UserActivity.log_activity(
-
user,
-
UserActivity::ACTIVITY_TYPES[:failed_login],
-
controller_name: controller_name,
-
action_name: action_name,
-
ip_address: request.remote_ip,
-
user_agent: request.user_agent,
-
metadata: { attempted_email: email }
-
)
-
end
-
end
-
2
module ActivityTracker
-
2
extend ActiveSupport::Concern
-
-
2
included do
-
2
around_action :track_activity, if: :track_activity?
-
2
before_action :set_current_request_context
-
end
-
-
2
private
-
-
2
def track_activity
-
20
return yield unless current_user && track_activity?
-
-
# Skip tracking for RailsAdmin controllers to avoid compatibility issues
-
9
return yield if controller_name.include?('rails_admin') || self.class.name.include?('RailsAdmin')
-
-
9
start_time = Time.current
-
-
# Set request ID for logging correlation
-
9
Thread.current[:request_id] = request.request_id
-
-
# Log the start of the action
-
9
ActivityLogger.log(:debug, "Action started", {
-
controller: controller_name,
-
action: action_name,
-
user_id: current_user.id,
-
method: request.method
-
})
-
-
9
yield
-
-
# Track successful activities
-
9
response_time = Time.current - start_time
-
9
log_user_activity(response_time: response_time) if start_time
-
-
# Log performance metrics for slow requests
-
9
if response_time > 1.0
-
ActivityLogger.performance('slow_request', "Slow request detected", {
-
controller: controller_name,
-
action: action_name,
-
duration_ms: (response_time * 1000).round,
-
path: request.path
-
})
-
end
-
-
rescue => e
-
# Track failed activities, but don't interfere with API error handling
-
response_time = start_time ? Time.current - start_time : nil
-
-
# Log the error for debugging, but let API controllers handle their own errors
-
unless self.class.ancestors.any? { |a| a.name == 'Api::V1::BaseController' }
-
ActivityLogger.log(:error, "Action failed", {
-
controller: controller_name,
-
action: action_name,
-
error: e.message,
-
backtrace: e.backtrace.first(5),
-
duration_ms: response_time ? (response_time * 1000).round : nil
-
})
-
-
log_user_activity(
-
response_time: response_time,
-
error: e.message,
-
response_status: 500
-
) if current_user
-
end
-
-
raise e
-
ensure
-
20
Thread.current[:request_id] = nil
-
end
-
-
2
def log_custom_activity(action_name, metadata = {})
-
return unless current_user
-
-
Activity.create!(
-
user: current_user,
-
action: action_name,
-
controller: controller_name,
-
path: request.path,
-
method: request.method,
-
ip_address: request.remote_ip,
-
user_agent: request.user_agent,
-
metadata: metadata
-
)
-
rescue => e
-
Rails.logger.error "Failed to log custom activity: #{e.message}"
-
end
-
-
2
def log_user_activity(additional_metadata = {})
-
9
return unless current_user && should_log_activity?
-
-
metadata = {
-
8
params: filtered_params,
-
response_time: additional_metadata[:response_time],
-
error: additional_metadata[:error],
-
request_format: request.format.to_s,
-
ajax_request: request.xhr?,
-
ssl: request.ssl?
-
}.compact
-
-
8
activity = Activity.log_activity(
-
user: current_user,
-
action: action_name,
-
controller: controller_name,
-
request: request,
-
response: response,
-
metadata: metadata
-
)
-
-
# Check for suspicious activity
-
8
if activity.persisted?
-
8
suspicious = check_suspicious_activity(activity)
-
-
# Log security events
-
8
if suspicious
-
ActivityLogger.security('suspicious_activity', "Suspicious activity detected", {
-
activity_id: activity.id,
-
reasons: activity.metadata['suspicious_reasons']
-
})
-
end
-
end
-
rescue => e
-
Rails.logger.error "Failed to log activity: #{e.message}"
-
ActivityLogger.log(:error, "Activity logging failed", {
-
error: e.message,
-
controller: controller_name,
-
action: action_name
-
})
-
end
-
-
2
def check_suspicious_activity(activity)
-
8
SuspiciousActivityDetector.new(activity).check
-
end
-
-
2
def track_activity?
-
# Track all actions by default, override in controllers as needed
-
29
true
-
end
-
-
2
def should_log_activity?
-
# Don't log certain actions to avoid noise
-
8
skip_actions = %w[heartbeat health_check]
-
8
skip_controllers = %w[rails_admin active_storage]
-
-
8
!skip_actions.include?(action_name) &&
-
!skip_controllers.include?(controller_name) &&
-
!request.path.start_with?('/rails/active_storage')
-
end
-
-
2
def filtered_params
-
# Remove sensitive parameters
-
8
request.filtered_parameters.except("controller", "action", "authenticity_token")
-
rescue
-
{}
-
end
-
-
2
def set_current_request_context
-
# Set context for Current attributes
-
20
Current.request_id = request.request_id
-
20
Current.user_agent = request.user_agent
-
20
Current.ip_address = request.remote_ip
-
20
Current.session_id = session.id if session.loaded?
-
end
-
end
-
module AdminAuditable
-
extend ActiveSupport::Concern
-
-
included do
-
if respond_to?(:after_action)
-
after_action :log_admin_action, if: :should_audit?
-
end
-
end
-
-
private
-
-
def log_admin_action
-
return unless current_user && admin_action_performed?
-
-
action_name = determine_admin_action
-
auditable = determine_auditable_resource
-
changes = determine_changes
-
-
AdminAuditLog.log_action(
-
user: current_user,
-
action: action_name,
-
auditable: auditable,
-
changes: changes,
-
request: request
-
)
-
rescue => e
-
Rails.logger.error "Failed to log admin action: #{e.message}"
-
end
-
-
def should_audit?
-
# Only audit if user is admin and we're in the admin area
-
current_user&.admin? && request.path.start_with?("/admin")
-
end
-
-
def admin_action_performed?
-
# Check if the request method indicates a change was made
-
request.post? || request.put? || request.patch? || request.delete?
-
end
-
-
def determine_admin_action
-
case request.method.downcase
-
when "post"
-
params[:action] == "create" ? "created" : "action_performed"
-
when "put", "patch"
-
"updated"
-
when "delete"
-
"deleted"
-
else
-
"viewed"
-
end
-
end
-
-
def determine_auditable_resource
-
# Try to determine the resource being acted upon
-
if defined?(@object) && @object.present?
-
@object
-
elsif params[:model_name].present? && params[:id].present?
-
begin
-
model_class = params[:model_name].classify.constantize
-
model_class.find_by(id: params[:id])
-
rescue
-
nil
-
end
-
end
-
end
-
-
def determine_changes
-
return nil unless defined?(@object) && @object.present?
-
-
if @object.respond_to?(:previous_changes) && @object.previous_changes.any?
-
# Filter out sensitive fields
-
@object.previous_changes.except(
-
"password_digest",
-
"password",
-
"password_confirmation",
-
"session_token",
-
"reset_token"
-
)
-
elsif params[:bulk_ids].present?
-
{ bulk_action: true, affected_ids: params[:bulk_ids] }
-
else
-
params.permit!.to_h.except(
-
:controller,
-
:action,
-
:authenticity_token,
-
:_method,
-
:utf8,
-
:password,
-
:password_confirmation
-
).presence
-
end
-
end
-
end
-
module ApiAuthentication
-
extend ActiveSupport::Concern
-
-
included do
-
before_action :authenticate_api_user
-
end
-
-
private
-
-
def authenticate_api_user
-
# Use the existing session-based authentication for API endpoints
-
unless authenticated?
-
render_api_authentication_error
-
return false
-
end
-
-
# Check if user account is active
-
if current_user.locked?
-
render_api_account_locked_error
-
return false
-
end
-
-
true
-
end
-
-
def render_api_authentication_error
-
render json: {
-
success: false,
-
message: 'Authentication required',
-
code: 'AUTHENTICATION_REQUIRED'
-
}, status: :unauthorized
-
end
-
-
def render_api_account_locked_error
-
render json: {
-
success: false,
-
message: 'Account is locked',
-
code: 'ACCOUNT_LOCKED',
-
details: current_user.lock_reason
-
}, status: :forbidden
-
end
-
-
# Override parent class methods to return JSON instead of redirects
-
def request_authentication
-
render_api_authentication_error
-
end
-
end
-
module ApiErrorHandling
-
extend ActiveSupport::Concern
-
-
included do
-
# Rails processes rescue_from in reverse order, so put StandardError first
-
rescue_from StandardError, with: :handle_internal_error
-
rescue_from Pundit::NotAuthorizedError, with: :handle_unauthorized
-
rescue_from ActionController::ParameterMissing, with: :handle_parameter_missing
-
rescue_from ActiveRecord::RecordInvalid, with: :handle_validation_error
-
rescue_from ActiveRecord::RecordNotFound, with: :handle_not_found
-
end
-
-
private
-
-
def handle_not_found(exception)
-
render_error(
-
message: 'Resource not found',
-
status: :not_found,
-
code: 'RESOURCE_NOT_FOUND'
-
)
-
end
-
-
def handle_validation_error(exception)
-
render_error(
-
message: 'Validation failed',
-
errors: exception.record.errors.as_json,
-
status: :unprocessable_entity,
-
code: 'VALIDATION_ERROR'
-
)
-
end
-
-
def handle_parameter_missing(exception)
-
render_error(
-
message: "Required parameter missing: #{exception.param}",
-
status: :bad_request,
-
code: 'PARAMETER_MISSING'
-
)
-
end
-
-
def handle_unauthorized(exception)
-
render_error(
-
message: 'Access denied',
-
status: :forbidden,
-
code: 'ACCESS_DENIED'
-
)
-
end
-
-
def handle_internal_error(exception)
-
# Log the error for debugging
-
Rails.logger.error "API Error: #{exception.class} - #{exception.message}"
-
Rails.logger.error exception.backtrace.join("\n") if Rails.env.development?
-
-
# Don't expose internal error details in production
-
message = Rails.env.production? ? 'Internal server error' : exception.message
-
-
render_error(
-
message: message,
-
status: :internal_server_error,
-
code: 'INTERNAL_ERROR'
-
)
-
end
-
end
-
module ApiPagination
-
extend ActiveSupport::Concern
-
-
DEFAULT_PAGE_SIZE = 25
-
MAX_PAGE_SIZE = 100
-
-
private
-
-
def paginate_collection(collection)
-
page = [params[:page].to_i, 1].max
-
per_page = [[params[:per_page].to_i, DEFAULT_PAGE_SIZE].max, MAX_PAGE_SIZE].min
-
-
offset = (page - 1) * per_page
-
total_count = collection.count
-
total_pages = (total_count.to_f / per_page).ceil
-
-
paginated_collection = collection.limit(per_page).offset(offset)
-
-
{
-
collection: paginated_collection,
-
meta: {
-
pagination: {
-
current_page: page,
-
per_page: per_page,
-
total_count: total_count,
-
total_pages: total_pages,
-
has_next_page: page < total_pages,
-
has_previous_page: page > 1
-
}
-
}
-
}
-
end
-
-
def paginate_and_render(collection, serializer: nil, **options)
-
result = paginate_collection(collection)
-
-
data = if serializer
-
result[:collection].map { |item| serializer.call(item) }
-
else
-
result[:collection]
-
end
-
-
render_success(
-
data: data,
-
meta: result[:meta],
-
**options
-
)
-
end
-
end
-
2
module Authentication
-
2
extend ActiveSupport::Concern
-
-
2
included do
-
2
before_action :require_authentication
-
2
helper_method :authenticated?, :current_user
-
end
-
-
2
class_methods do
-
2
def allow_unauthenticated_access(**options)
-
3
skip_before_action :require_authentication, **options
-
end
-
end
-
-
2
private
-
2
def authenticated?
-
9
resume_session
-
end
-
-
2
def current_user
-
86
Current.session&.user
-
end
-
-
2
def require_authentication
-
12
resume_session || request_authentication
-
end
-
-
2
def resume_session
-
21
Current.session ||= find_session_by_cookie
-
-
21
if Current.session
-
15
if Current.session.expired? || Current.session.inactive?
-
terminate_session
-
false
-
15
elsif Current.session.user.locked?
-
terminate_session
-
redirect_to new_session_path, alert: "Your account has been locked: #{Current.session.user.lock_reason}"
-
false
-
else
-
15
Current.session.touch_activity!
-
15
true
-
end
-
else
-
6
false
-
end
-
end
-
-
2
def find_session_by_cookie
-
15
Session.active.find_by(id: cookies.signed[:session_id]) if cookies.signed[:session_id]
-
end
-
-
2
def request_authentication
-
3
session[:return_to_after_authenticating] = request.url
-
3
redirect_to new_session_path
-
end
-
-
2
def after_authentication_url
-
6
session.delete(:return_to_after_authenticating) || root_url
-
end
-
-
2
def start_new_session_for(user, remember_me: false)
-
7
session_timeout = remember_me ? 30.days : Session::SESSION_TIMEOUT
-
-
7
user.sessions.create!(
-
user_agent: request.user_agent,
-
ip_address: request.remote_ip,
-
expires_at: session_timeout.from_now
-
).tap do |session|
-
7
Current.session = session
-
-
7
if remember_me
-
cookies.signed.permanent[:session_id] = {
-
value: session.id,
-
httponly: true,
-
same_site: :lax,
-
secure: Rails.env.production?
-
}
-
else
-
7
cookies.signed[:session_id] = {
-
value: session.id,
-
httponly: true,
-
same_site: :lax,
-
secure: Rails.env.production?,
-
expires: session_timeout.from_now
-
}
-
end
-
end
-
end
-
-
2
def terminate_session
-
1
Current.session.destroy if Current.session
-
1
cookies.delete(:session_id)
-
1
Current.session = nil
-
end
-
end
-
2
module RailsAdminAuditable
-
2
extend ActiveSupport::Concern
-
-
2
included do
-
2
after_action :log_admin_action, if: :admin_action_performed?
-
end
-
-
2
private
-
-
2
def admin_action_performed?
-
# Only log write actions in admin panel
-
20
controller_name == 'rails_admin/main' &&
-
%w[create update destroy bulk_delete].include?(action_name)
-
end
-
-
2
def log_admin_action
-
return unless current_user
-
-
action = determine_admin_action
-
auditable = determine_auditable
-
changes = determine_changes
-
-
AdminAuditLog.log_action(
-
user: current_user,
-
action: action,
-
auditable: auditable,
-
changes: changes,
-
request: request
-
)
-
rescue StandardError => e
-
Rails.logger.error "Failed to log admin action: #{e.message}"
-
end
-
-
2
def determine_admin_action
-
case action_name
-
when 'create'
-
"created_#{@model_config.abstract_model.model.name.underscore}"
-
when 'update'
-
"updated_#{@model_config.abstract_model.model.name.underscore}"
-
when 'destroy'
-
"deleted_#{@model_config.abstract_model.model.name.underscore}"
-
when 'bulk_delete'
-
"bulk_deleted_#{@model_config.abstract_model.model.name.underscore.pluralize}"
-
else
-
action_name
-
end
-
end
-
-
2
def determine_auditable
-
case action_name
-
when 'create', 'update'
-
@object
-
when 'destroy'
-
# Object might be destroyed, so we log the class and ID
-
{ type: @model_config.abstract_model.model.name, id: params[:id] }
-
when 'bulk_delete'
-
{ type: @model_config.abstract_model.model.name, ids: params[:bulk_ids] }
-
else
-
nil
-
end
-
end
-
-
2
def determine_changes
-
case action_name
-
when 'create'
-
@object.attributes
-
when 'update'
-
@object.previous_changes.except('updated_at')
-
when 'destroy'
-
{ deleted_record: @object.attributes }
-
when 'bulk_delete'
-
{ deleted_count: params[:bulk_ids]&.size || 0 }
-
else
-
nil
-
end
-
end
-
end
-
class ErrorsController < ApplicationController
-
allow_unauthenticated_access
-
skip_before_action :verify_browser_compatibility
-
-
def not_found
-
@error_type = :not_found
-
@error_code = 404
-
@error_message = "Page Not Found"
-
@error_description = "The page you're looking for doesn't exist or has been moved."
-
-
log_error_details
-
render template: 'errors/404', status: 404
-
end
-
-
def unprocessable_entity
-
@error_type = :unprocessable_entity
-
@error_code = 422
-
@error_message = "Unprocessable Request"
-
@error_description = "We couldn't process your request due to invalid data or parameters."
-
-
log_error_details
-
render template: 'errors/422', status: 422
-
end
-
-
def internal_server_error
-
@error_type = :internal_server_error
-
@error_code = 500
-
@error_message = "Internal Server Error"
-
@error_description = "Something went wrong on our end. We've been notified and are working to fix it."
-
-
log_error_details
-
render template: 'errors/500', status: 500
-
end
-
-
def report_error
-
return unless authenticated?
-
-
report_params = params.require(:error_report).permit(:description, :error_type, :current_url, :expected_behavior)
-
-
error_report_context = {
-
user_id: current_user.id,
-
user_email: current_user.email_address,
-
description: report_params[:description],
-
error_type: report_params[:error_type],
-
current_url: report_params[:current_url],
-
expected_behavior: report_params[:expected_behavior],
-
user_agent: request.user_agent,
-
ip_address: request.remote_ip,
-
timestamp: Time.current
-
}
-
-
# Log the user report
-
ActivityLogger.log(:info, "User error report submitted", error_report_context)
-
-
# Send to admin
-
if defined?(AdminMailer)
-
AdminMailer.user_error_report(error_report_context).deliver_later
-
end
-
-
respond_to do |format|
-
format.json { render json: { status: 'success', message: 'Thank you for your report. We will investigate this issue.' } }
-
format.html {
-
flash[:notice] = 'Thank you for your report. We will investigate this issue.'
-
redirect_back(fallback_location: root_path)
-
}
-
end
-
rescue => e
-
ActivityLogger.log(:error, "Error report submission failed: #{e.message}", { user_id: current_user&.id })
-
-
respond_to do |format|
-
format.json { render json: { status: 'error', message: 'Unable to submit report at this time.' }, status: :unprocessable_entity }
-
format.html {
-
flash[:alert] = 'Unable to submit report at this time. Please try again later.'
-
redirect_back(fallback_location: root_path)
-
}
-
end
-
end
-
-
private
-
-
def log_error_details
-
error_context = {
-
error_type: @error_type,
-
error_code: @error_code,
-
request_path: request.path,
-
request_method: request.method,
-
referrer: request.referrer,
-
user_agent: request.user_agent,
-
ip_address: request.remote_ip,
-
user_id: current_user&.id,
-
session_id: session.id,
-
params: filtered_params
-
}
-
-
case @error_code
-
when 404
-
# Log 404s as info level for analytics, but track suspicious patterns
-
ActivityLogger.log(:info, "Page not found: #{request.path}", error_context)
-
track_suspicious_404_pattern(error_context)
-
ActivityLogger.track_error_pattern('not_found', error_context)
-
when 422
-
# Log validation errors
-
ActivityLogger.log(:warn, "Unprocessable entity: #{request.path}", error_context)
-
ActivityLogger.track_error_pattern('unprocessable_entity', error_context)
-
when 500
-
# Log server errors as errors and notify
-
ActivityLogger.security('system_error', "Internal server error occurred", error_context)
-
notify_admin_of_error(error_context)
-
ActivityLogger.track_error_pattern('internal_server_error', error_context)
-
end
-
end
-
-
def track_suspicious_404_pattern(context)
-
# Track repeated 404s from same IP/user for security monitoring
-
return unless context[:ip_address] || context[:user_id]
-
-
cache_key = "404_tracking_#{context[:ip_address]}_#{context[:user_id]}"
-
count = Rails.cache.read(cache_key) || 0
-
count += 1
-
-
Rails.cache.write(cache_key, count, expires_in: 1.hour)
-
-
# Flag suspicious activity if too many 404s
-
if count > 10
-
ActivityLogger.security('suspicious_activity',
-
"Excessive 404 requests detected",
-
context.merge(request_count: count)
-
)
-
end
-
end
-
-
def notify_admin_of_error(context)
-
# Queue notification for admins about server errors
-
if defined?(AdminMailer) && Rails.env.production?
-
AdminMailer.error_notification(context).deliver_later
-
end
-
end
-
-
def filtered_params
-
# Remove sensitive parameters from logging
-
request.filtered_parameters.except('authenticity_token', 'commit')
-
end
-
end
-
class HomeController < ApplicationController
-
allow_unauthenticated_access
-
-
def index
-
end
-
-
def loading_demo
-
# Demo action for testing loading states
-
end
-
-
def typography_demo
-
# Demo action for showcasing responsive typography
-
end
-
end
-
class JourneyStepsController < ApplicationController
-
include Authentication
-
include ActivityTracker
-
-
before_action :set_journey
-
before_action :set_journey_step, only: [:show, :edit, :update, :destroy, :move, :duplicate]
-
before_action :ensure_user_can_access_journey
-
before_action :ensure_user_can_access_step, only: [:show, :edit, :update, :destroy, :move, :duplicate]
-
-
# GET /journeys/:journey_id/steps/:id
-
def show
-
@transitions_from = @journey_step.transitions_from.includes(:to_step)
-
@transitions_to = @journey_step.transitions_to.includes(:from_step)
-
-
# Track activity
-
track_activity('viewed_journey_step', {
-
journey_id: @journey.id,
-
step_id: @journey_step.id,
-
step_name: @journey_step.name
-
})
-
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_step_for_json(@journey_step) }
-
end
-
end
-
-
# GET /journeys/:journey_id/steps/new
-
def new
-
@journey_step = @journey.journey_steps.build
-
-
# Set defaults
-
@journey_step.stage = params[:stage] if params[:stage].present?
-
@journey_step.content_type = params[:content_type] if params[:content_type].present?
-
@journey_step.channel = params[:channel] if params[:channel].present?
-
-
authorize @journey_step
-
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_step_for_json(@journey_step) }
-
end
-
end
-
-
# POST /journeys/:journey_id/steps
-
def create
-
@journey_step = @journey.journey_steps.build(journey_step_params)
-
authorize @journey_step
-
-
respond_to do |format|
-
if @journey_step.save
-
# Track activity
-
track_activity('created_journey_step', {
-
journey_id: @journey.id,
-
step_id: @journey_step.id,
-
step_name: @journey_step.name,
-
stage: @journey_step.stage,
-
content_type: @journey_step.content_type
-
})
-
-
format.html { redirect_to [@journey, @journey_step], notice: 'Journey step was successfully created.' }
-
format.json { render json: serialize_step_for_json(@journey_step), status: :created }
-
else
-
format.html { render :new, status: :unprocessable_entity }
-
format.json { render json: { errors: @journey_step.errors.as_json }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# GET /journeys/:journey_id/steps/:id/edit
-
def edit
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_step_for_json(@journey_step) }
-
end
-
end
-
-
# PATCH/PUT /journeys/:journey_id/steps/:id
-
def update
-
respond_to do |format|
-
if @journey_step.update(journey_step_params)
-
# Track activity
-
track_activity('updated_journey_step', {
-
journey_id: @journey.id,
-
step_id: @journey_step.id,
-
step_name: @journey_step.name,
-
changes: @journey_step.saved_changes.keys
-
})
-
-
format.html { redirect_to [@journey, @journey_step], notice: 'Journey step was successfully updated.' }
-
format.json { render json: serialize_step_for_json(@journey_step) }
-
else
-
format.html { render :edit, status: :unprocessable_entity }
-
format.json { render json: { errors: @journey_step.errors.as_json }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# DELETE /journeys/:journey_id/steps/:id
-
def destroy
-
step_name = @journey_step.name
-
@journey_step.destroy!
-
-
# Track activity
-
track_activity('deleted_journey_step', {
-
journey_id: @journey.id,
-
step_name: step_name,
-
step_id: params[:id]
-
})
-
-
respond_to do |format|
-
format.html { redirect_to @journey, notice: 'Journey step was successfully deleted.' }
-
format.json { render json: { message: 'Journey step was successfully deleted.' } }
-
end
-
end
-
-
# PATCH /journeys/:journey_id/steps/:id/move
-
def move
-
new_position = params[:position].to_i
-
-
respond_to do |format|
-
begin
-
@journey_step.move_to_position(new_position)
-
-
# Track activity
-
track_activity('moved_journey_step', {
-
journey_id: @journey.id,
-
step_id: @journey_step.id,
-
step_name: @journey_step.name,
-
new_position: new_position
-
})
-
-
format.html { redirect_to @journey, notice: 'Journey step position updated successfully.' }
-
format.json { render json: serialize_step_for_json(@journey_step.reload) }
-
rescue => e
-
format.html { redirect_to @journey, alert: "Failed to move step: #{e.message}" }
-
format.json { render json: { error: "Failed to move step: #{e.message}" }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# POST /journeys/:journey_id/steps/:id/duplicate
-
def duplicate
-
respond_to do |format|
-
begin
-
# Create a duplicate of the step
-
@new_step = @journey_step.dup
-
@new_step.name = "#{@journey_step.name} (Copy)"
-
@new_step.position = nil # Will be set automatically
-
-
if @new_step.save
-
# Track activity
-
track_activity('duplicated_journey_step', {
-
journey_id: @journey.id,
-
original_step_id: @journey_step.id,
-
new_step_id: @new_step.id,
-
step_name: @new_step.name
-
})
-
-
format.html { redirect_to [@journey, @new_step], notice: 'Journey step was successfully duplicated.' }
-
format.json { render json: serialize_step_for_json(@new_step), status: :created }
-
else
-
format.html { redirect_to [@journey, @journey_step], alert: 'Failed to duplicate step.' }
-
format.json { render json: { errors: @new_step.errors.as_json }, status: :unprocessable_entity }
-
end
-
rescue => e
-
format.html { redirect_to [@journey, @journey_step], alert: "Failed to duplicate step: #{e.message}" }
-
format.json { render json: { error: "Failed to duplicate step: #{e.message}" }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
private
-
-
def set_journey
-
@journey = Journey.find(params[:journey_id])
-
end
-
-
def set_journey_step
-
@journey_step = @journey.journey_steps.find(params[:id])
-
end
-
-
def ensure_user_can_access_journey
-
authorize @journey
-
end
-
-
def ensure_user_can_access_step
-
authorize @journey_step
-
end
-
-
def journey_step_params
-
params.require(:journey_step).permit(
-
:name, :description, :stage, :content_type, :channel, :duration_days,
-
:is_entry_point, :is_exit_point, config: {}, conditions: {}, metadata: {}
-
)
-
end
-
-
def serialize_step_for_json(step)
-
{
-
id: step.id,
-
name: step.name,
-
description: step.description,
-
stage: step.stage,
-
position: step.position,
-
content_type: step.content_type,
-
channel: step.channel,
-
duration_days: step.duration_days,
-
config: step.config,
-
conditions: step.conditions,
-
metadata: step.metadata,
-
is_entry_point: step.is_entry_point,
-
is_exit_point: step.is_exit_point,
-
journey_id: step.journey_id,
-
created_at: step.created_at,
-
updated_at: step.updated_at,
-
transitions_from: step.transitions_from.map { |t| serialize_transition(t) },
-
transitions_to: step.transitions_to.map { |t| serialize_transition(t) },
-
brand_compliant: step.respond_to?(:brand_compliant?) ? step.brand_compliant? : true,
-
compliance_score: step.respond_to?(:quick_compliance_score) ? step.quick_compliance_score : 1.0
-
}
-
end
-
-
def serialize_transition(transition)
-
{
-
id: transition.id,
-
from_step_id: transition.from_step_id,
-
to_step_id: transition.to_step_id,
-
from_step_name: transition.from_step&.name,
-
to_step_name: transition.to_step&.name,
-
transition_type: transition.transition_type,
-
conditions: transition.conditions,
-
priority: transition.priority,
-
metadata: transition.metadata
-
}
-
end
-
end
-
2
class JourneySuggestionsController < ApplicationController
-
2
before_action :set_journey
-
2
before_action :set_current_step, only: [:index, :for_step]
-
2
before_action :authorize_journey_access
-
-
# GET /journeys/:journey_id/suggestions
-
2
def index
-
filters = build_filters_from_params
-
-
begin
-
engine = JourneySuggestionEngine.new(
-
journey: @journey,
-
user: current_user,
-
current_step: @current_step,
-
provider: suggestion_provider
-
)
-
-
@suggestions = engine.generate_suggestions(filters)
-
@feedback_insights = engine.get_feedback_insights
-
-
respond_to do |format|
-
format.json {
-
render json: {
-
success: true,
-
data: {
-
suggestions: @suggestions,
-
feedback_insights: @feedback_insights,
-
journey_context: journey_context_summary,
-
filters_applied: filters,
-
provider: suggestion_provider,
-
cached: Rails.cache.exist?(cache_key_for_request)
-
},
-
meta: {
-
total_suggestions: @suggestions.length,
-
generated_at: Time.current,
-
expires_at: 1.hour.from_now
-
}
-
}
-
}
-
format.html { render :index }
-
end
-
rescue => e
-
Rails.logger.error "Suggestion generation failed: #{e.message}"
-
Rails.logger.error e.backtrace.join("\n")
-
-
render json: {
-
success: false,
-
error: {
-
message: "Failed to generate suggestions",
-
details: Rails.env.development? ? e.message : "Internal server error"
-
}
-
}, status: :internal_server_error
-
end
-
end
-
-
# GET /journeys/:journey_id/suggestions/for_stage/:stage
-
2
def for_stage
-
stage = params[:stage]
-
-
unless Journey::STAGES.include?(stage)
-
return render json: {
-
success: false,
-
error: { message: "Invalid stage: #{stage}" }
-
}, status: :bad_request
-
end
-
-
filters = build_filters_from_params.merge(stage: stage)
-
-
begin
-
engine = JourneySuggestionEngine.new(
-
journey: @journey,
-
user: current_user,
-
provider: suggestion_provider
-
)
-
-
@suggestions = engine.suggest_for_stage(stage, filters)
-
-
render json: {
-
success: true,
-
data: {
-
suggestions: @suggestions,
-
stage: stage,
-
filters_applied: filters,
-
provider: suggestion_provider
-
},
-
meta: {
-
total_suggestions: @suggestions.length,
-
generated_at: Time.current
-
}
-
}
-
rescue => e
-
Rails.logger.error "Stage suggestion generation failed: #{e.message}"
-
-
render json: {
-
success: false,
-
error: {
-
message: "Failed to generate stage suggestions",
-
details: Rails.env.development? ? e.message : "Internal server error"
-
}
-
}, status: :internal_server_error
-
end
-
end
-
-
# GET /journeys/:journey_id/suggestions/for_step/:step_id
-
2
def for_step
-
step = @journey.journey_steps.find(params[:step_id])
-
filters = build_filters_from_params
-
-
begin
-
engine = JourneySuggestionEngine.new(
-
journey: @journey,
-
user: current_user,
-
current_step: step,
-
provider: suggestion_provider
-
)
-
-
@suggestions = engine.generate_suggestions(filters)
-
-
render json: {
-
success: true,
-
data: {
-
suggestions: @suggestions,
-
current_step: step.as_json(only: [:id, :name, :stage, :content_type, :channel]),
-
filters_applied: filters,
-
provider: suggestion_provider
-
},
-
meta: {
-
total_suggestions: @suggestions.length,
-
generated_at: Time.current
-
}
-
}
-
rescue ActiveRecord::RecordNotFound
-
render json: {
-
success: false,
-
error: { message: "Journey step not found" }
-
}, status: :not_found
-
rescue => e
-
Rails.logger.error "Step suggestion generation failed: #{e.message}"
-
-
render json: {
-
success: false,
-
error: {
-
message: "Failed to generate step suggestions",
-
details: Rails.env.development? ? e.message : "Internal server error"
-
}
-
}, status: :internal_server_error
-
end
-
end
-
-
# POST /journeys/:journey_id/suggestions/feedback
-
2
def create_feedback
-
suggestion_data = params.require(:suggestion)
-
feedback_params = params.require(:feedback)
-
-
begin
-
engine = JourneySuggestionEngine.new(
-
journey: @journey,
-
user: current_user,
-
current_step: @current_step,
-
provider: suggestion_provider
-
)
-
-
feedback = engine.record_feedback(
-
suggestion_data.to_h,
-
feedback_params[:feedback_type],
-
rating: feedback_params[:rating],
-
selected: feedback_params[:selected],
-
context: feedback_params[:context]
-
)
-
-
if feedback.persisted?
-
render json: {
-
success: true,
-
data: {
-
feedback_id: feedback.id,
-
message: "Feedback recorded successfully"
-
}
-
}, status: :created
-
else
-
render json: {
-
success: false,
-
error: {
-
message: "Failed to record feedback",
-
details: feedback.errors.full_messages
-
}
-
}, status: :unprocessable_entity
-
end
-
rescue => e
-
Rails.logger.error "Feedback recording failed: #{e.message}"
-
-
render json: {
-
success: false,
-
error: {
-
message: "Failed to record feedback",
-
details: Rails.env.development? ? e.message : "Internal server error"
-
}
-
}, status: :internal_server_error
-
end
-
end
-
-
# GET /journeys/:journey_id/suggestions/insights
-
2
def insights
-
@insights = @journey.journey_insights
-
.active
-
.order(calculated_at: :desc)
-
.limit(10)
-
-
@feedback_analytics = calculate_feedback_analytics
-
@suggestion_performance = calculate_suggestion_performance
-
-
respond_to do |format|
-
format.json {
-
render json: {
-
success: true,
-
data: {
-
insights: @insights.map(&:to_summary),
-
feedback_analytics: @feedback_analytics,
-
suggestion_performance: @suggestion_performance,
-
journey_summary: journey_context_summary
-
},
-
meta: {
-
total_insights: @insights.length,
-
generated_at: Time.current
-
}
-
}
-
}
-
format.html { render :insights }
-
end
-
end
-
-
# GET /journeys/:journey_id/suggestions/analytics
-
2
def analytics
-
date_range = params[:date_range] || '30_days'
-
days = case date_range
-
when '7_days' then 7
-
when '30_days' then 30
-
when '90_days' then 90
-
else 30
-
end
-
-
@analytics = {
-
feedback_trends: calculate_feedback_trends(days),
-
selection_rates: calculate_selection_rates(days),
-
performance_by_type: calculate_performance_by_type(days),
-
ai_provider_comparison: calculate_provider_comparison(days),
-
improvement_opportunities: identify_improvement_opportunities
-
}
-
-
render json: {
-
success: true,
-
data: @analytics,
-
meta: {
-
date_range: date_range,
-
days_analyzed: days,
-
generated_at: Time.current
-
}
-
}
-
end
-
-
# DELETE /journeys/:journey_id/suggestions/cache
-
2
def clear_cache
-
cache_pattern = "journey_suggestions:#{@journey.id}:*"
-
Rails.cache.delete_matched(cache_pattern)
-
-
render json: {
-
success: true,
-
message: "Cache cleared for journey suggestions"
-
}
-
end
-
-
2
private
-
-
2
def set_journey
-
@journey = current_user.journeys.find(params[:journey_id])
-
rescue ActiveRecord::RecordNotFound
-
render json: {
-
success: false,
-
error: { message: "Journey not found" }
-
}, status: :not_found
-
end
-
-
2
def set_current_step
-
return unless params[:current_step_id]
-
-
@current_step = @journey.journey_steps.find(params[:current_step_id])
-
rescue ActiveRecord::RecordNotFound
-
@current_step = nil
-
end
-
-
2
def authorize_journey_access
-
unless @journey && @journey.user == current_user
-
render json: {
-
success: false,
-
error: { message: "Unauthorized access to journey" }
-
}, status: :forbidden
-
end
-
end
-
-
2
def build_filters_from_params
-
filters = {}
-
-
filters[:stage] = params[:stage] if params[:stage].present?
-
filters[:content_type] = params[:content_type] if params[:content_type].present?
-
filters[:channel] = params[:channel] if params[:channel].present?
-
filters[:max_suggestions] = params[:max_suggestions].to_i if params[:max_suggestions].present?
-
filters[:min_confidence] = params[:min_confidence].to_f if params[:min_confidence].present?
-
-
filters
-
end
-
-
2
def suggestion_provider
-
provider = params[:provider] || 'openai'
-
provider.to_sym if JourneySuggestionEngine::PROVIDERS.key?(provider.to_sym)
-
end
-
-
2
def journey_context_summary
-
{
-
id: @journey.id,
-
name: @journey.name,
-
status: @journey.status,
-
campaign_type: @journey.campaign_type,
-
total_steps: @journey.total_steps,
-
stages_coverage: @journey.steps_by_stage,
-
current_step: @current_step&.as_json(only: [:id, :name, :stage, :position])
-
}
-
end
-
-
2
def calculate_feedback_analytics
-
return {} unless @journey.suggestion_feedbacks.any?
-
-
{
-
average_ratings: @journey.suggestion_feedbacks.average_rating_by_type,
-
total_feedback_count: @journey.suggestion_feedbacks.count,
-
selection_rate: calculate_overall_selection_rate,
-
feedback_distribution: @journey.suggestion_feedbacks.group(:feedback_type).count,
-
recent_trends: @journey.suggestion_feedbacks.feedback_trends(7)
-
}
-
end
-
-
2
def calculate_suggestion_performance
-
feedbacks = @journey.suggestion_feedbacks.includes(:journey_step)
-
-
{
-
top_performing_content_types: feedbacks.selection_rate_by_content_type,
-
top_performing_stages: feedbacks.selection_rate_by_stage,
-
most_selected_suggestions: feedbacks.top_performing_suggestions(5),
-
provider_performance: calculate_provider_feedback_performance
-
}
-
end
-
-
2
def calculate_overall_selection_rate
-
total_feedbacks = @journey.suggestion_feedbacks.count
-
return 0 if total_feedbacks.zero?
-
-
selected_count = @journey.suggestion_feedbacks.selected.count
-
(selected_count.to_f / total_feedbacks * 100).round(2)
-
end
-
-
2
def calculate_feedback_trends(days)
-
@journey.suggestion_feedbacks
-
.where('created_at >= ?', days.days.ago)
-
.group_by_day(:created_at)
-
.group(:feedback_type)
-
.average(:rating)
-
end
-
-
2
def calculate_selection_rates(days)
-
feedbacks = @journey.suggestion_feedbacks.where('created_at >= ?', days.days.ago)
-
-
{
-
overall: calculate_selection_rate_for_feedbacks(feedbacks),
-
by_content_type: feedbacks.selection_rate_by_content_type,
-
by_stage: feedbacks.selection_rate_by_stage
-
}
-
end
-
-
2
def calculate_performance_by_type(days)
-
feedbacks = @journey.suggestion_feedbacks.where('created_at >= ?', days.days.ago)
-
-
JourneySuggestionEngine::FEEDBACK_TYPES.map do |feedback_type|
-
type_feedbacks = feedbacks.by_feedback_type(feedback_type)
-
{
-
feedback_type: feedback_type,
-
average_rating: type_feedbacks.average(:rating)&.round(2),
-
total_count: type_feedbacks.count,
-
positive_count: type_feedbacks.positive.count,
-
negative_count: type_feedbacks.negative.count
-
}
-
end
-
end
-
-
2
def calculate_provider_comparison(days)
-
feedbacks = @journey.suggestion_feedbacks.where('created_at >= ?', days.days.ago)
-
-
provider_data = {}
-
-
feedbacks.group_by { |f| f.ai_provider }.each do |provider, provider_feedbacks|
-
provider_data[provider] = {
-
total_suggestions: provider_feedbacks.count,
-
average_rating: provider_feedbacks.map(&:rating).compact.sum.to_f / provider_feedbacks.count,
-
selection_rate: calculate_selection_rate_for_feedbacks(provider_feedbacks),
-
response_time: nil # Would be tracked separately
-
}
-
end
-
-
provider_data
-
end
-
-
2
def identify_improvement_opportunities
-
opportunities = []
-
-
# Low-rated content types
-
low_performing_content = @journey.suggestion_feedbacks
-
.joins(:journey_step)
-
.group('journey_steps.content_type')
-
.having('AVG(rating) < ?', 3.0)
-
.average(:rating)
-
-
low_performing_content.each do |content_type, avg_rating|
-
opportunities << {
-
type: 'content_improvement',
-
content_type: content_type,
-
current_rating: avg_rating.round(2),
-
recommendation: "Improve #{content_type} suggestions - currently underperforming"
-
}
-
end
-
-
# Underrepresented stages
-
stage_coverage = @journey.steps_by_stage
-
total_steps = @journey.total_steps
-
-
Journey::STAGES.each do |stage|
-
stage_count = stage_coverage[stage] || 0
-
if stage_count < (total_steps * 0.1) # Less than 10% representation
-
opportunities << {
-
type: 'stage_coverage',
-
stage: stage,
-
current_count: stage_count,
-
recommendation: "Consider adding more #{stage} stage steps to balance the journey"
-
}
-
end
-
end
-
-
opportunities
-
end
-
-
2
def calculate_provider_feedback_performance
-
@journey.suggestion_feedbacks
-
.group_by { |f| f.ai_provider }
-
.transform_values do |feedbacks|
-
{
-
count: feedbacks.length,
-
avg_rating: feedbacks.map(&:rating).compact.sum.to_f / feedbacks.length,
-
selection_rate: calculate_selection_rate_for_feedbacks(feedbacks)
-
}
-
end
-
end
-
-
2
def calculate_selection_rate_for_feedbacks(feedbacks)
-
return 0 if feedbacks.empty?
-
-
selected_count = feedbacks.count { |f| f.selected? }
-
(selected_count.to_f / feedbacks.length * 100).round(2)
-
end
-
-
2
def cache_key_for_request
-
filters = build_filters_from_params
-
key_parts = [
-
"journey_suggestions",
-
@journey.id,
-
@journey.updated_at.to_i,
-
@current_step&.id,
-
current_user.id,
-
suggestion_provider,
-
Digest::MD5.hexdigest(filters.to_json)
-
]
-
-
key_parts.join(":")
-
end
-
end
-
class JourneyTemplatesController < ApplicationController
-
include Authentication
-
include ActivityTracker
-
-
before_action :set_journey_template, only: [:show, :edit, :update, :destroy, :clone, :use_template, :builder, :builder_react]
-
before_action :ensure_user_can_access_template, only: [:show, :edit, :update, :destroy, :clone, :use_template, :builder, :builder_react]
-
-
def index
-
@templates = policy_scope(JourneyTemplate).active.includes(:journeys)
-
-
# Filter by category if specified
-
@templates = @templates.by_category(params[:category]) if params[:category].present?
-
-
# Filter by campaign type if specified
-
@templates = @templates.by_campaign_type(params[:campaign_type]) if params[:campaign_type].present?
-
-
# Search by name or description
-
if params[:search].present?
-
@templates = @templates.where(
-
"name ILIKE ? OR description ILIKE ?",
-
"%#{params[:search]}%", "%#{params[:search]}%"
-
)
-
end
-
-
# Sort templates
-
case params[:sort]
-
when 'popular'
-
@templates = @templates.popular
-
when 'recent'
-
@templates = @templates.recent
-
else
-
@templates = @templates.order(:name)
-
end
-
-
@categories = JourneyTemplate::CATEGORIES
-
@campaign_types = Journey::CAMPAIGN_TYPES
-
-
# Track activity
-
track_activity('viewed_journey_templates', { count: @templates.count })
-
end
-
-
def show
-
@preview_steps = @template.preview_steps
-
@stages_covered = @template.stages_covered
-
@channels_used = @template.channels_used
-
@content_types = @template.content_types_included
-
-
# Track activity
-
track_activity('viewed_journey_template', {
-
template_id: @template.id,
-
template_name: @template.name
-
})
-
end
-
-
def new
-
@template = JourneyTemplate.new
-
authorize @template
-
end
-
-
def create
-
@template = JourneyTemplate.new(template_params)
-
authorize @template
-
-
if @template.save
-
# Track activity
-
track_activity('created_journey_template', {
-
template_id: @template.id,
-
template_name: @template.name,
-
category: @template.category
-
})
-
-
respond_to do |format|
-
format.html { redirect_to @template, notice: 'Journey template was successfully created.' }
-
format.json { render json: @template, status: :created }
-
end
-
else
-
respond_to do |format|
-
format.html { render :new, status: :unprocessable_entity }
-
format.json { render json: { errors: @template.errors }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
def edit
-
end
-
-
def update
-
if @template.update(template_params)
-
# Track activity
-
track_activity('updated_journey_template', {
-
template_id: @template.id,
-
template_name: @template.name,
-
changes: @template.saved_changes.keys
-
})
-
-
respond_to do |format|
-
format.html { redirect_to @template, notice: 'Journey template was successfully updated.' }
-
format.json { render json: @template }
-
end
-
else
-
respond_to do |format|
-
format.html { render :edit, status: :unprocessable_entity }
-
format.json { render json: { errors: @template.errors }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
def destroy
-
template_name = @template.name
-
@template.update!(is_active: false)
-
-
# Track activity
-
track_activity('deactivated_journey_template', {
-
template_id: @template.id,
-
template_name: template_name
-
})
-
-
redirect_to journey_templates_path, notice: 'Journey template was deactivated.'
-
end
-
-
def clone
-
new_template = @template.dup
-
new_template.name = "#{@template.name} (Copy)"
-
new_template.usage_count = 0
-
new_template.is_active = true
-
-
if new_template.save
-
# Track activity
-
track_activity('cloned_journey_template', {
-
original_template_id: @template.id,
-
new_template_id: new_template.id,
-
template_name: new_template.name
-
})
-
-
redirect_to edit_journey_template_path(new_template),
-
notice: 'Template cloned successfully. You can now customize it.'
-
else
-
redirect_to @template, alert: 'Failed to clone template.'
-
end
-
end
-
-
def use_template
-
journey = @template.create_journey_for_user(
-
current_user,
-
journey_params_for_template
-
)
-
-
if journey.persisted?
-
# Track activity
-
track_activity('used_journey_template', {
-
template_id: @template.id,
-
template_name: @template.name,
-
journey_id: journey.id,
-
journey_name: journey.name
-
})
-
-
redirect_to journey_path(journey),
-
notice: 'Journey created from template successfully!'
-
else
-
redirect_to @template,
-
alert: "Failed to create journey: #{journey.errors.full_messages.join(', ')}"
-
end
-
end
-
-
def builder
-
# Visual journey builder interface
-
@template ||= JourneyTemplate.new
-
@existing_steps = @template.template_data&.dig('steps') || []
-
@stages = ['awareness', 'consideration', 'conversion', 'retention']
-
@step_types = JourneyStep::STEP_TYPES
-
end
-
-
def builder_react
-
# React-based visual journey builder interface
-
@template ||= JourneyTemplate.new
-
-
# Prepare data for React component
-
@journey_data = {
-
id: @template.id,
-
name: @template.name || 'New Journey',
-
description: @template.description || '',
-
steps: @template.steps_data || [],
-
connections: @template.connections_data || [],
-
status: @template.published? ? 'published' : 'draft'
-
}
-
end
-
-
private
-
-
def set_journey_template
-
if params[:id] == 'new'
-
@template = JourneyTemplate.new
-
else
-
@template = JourneyTemplate.find(params[:id])
-
end
-
end
-
-
def ensure_user_can_access_template
-
authorize @template
-
end
-
-
def template_params
-
params.require(:journey_template).permit(
-
:name, :description, :category, :campaign_type, :difficulty_level,
-
:estimated_duration_days, :is_active, :template_data, :status,
-
steps_data: [], connections_data: []
-
)
-
end
-
-
def journey_params_for_template
-
params.permit(:name, :description, :target_audience, :goals, :brand_id)
-
end
-
end
-
class JourneysController < ApplicationController
-
include Authentication
-
include ActivityTracker
-
-
before_action :set_journey, only: [:show, :edit, :update, :destroy, :duplicate, :publish, :archive, :builder]
-
before_action :ensure_user_can_access_journey, only: [:show, :edit, :update, :destroy, :duplicate, :publish, :archive, :builder]
-
-
# GET /journeys
-
def index
-
@journeys = policy_scope(Journey)
-
-
# Apply filters
-
@journeys = @journeys.where(status: params[:status]) if params[:status].present?
-
@journeys = @journeys.where(campaign_type: params[:campaign_type]) if params[:campaign_type].present?
-
@journeys = @journeys.joins(:campaign).where(campaigns: { id: params[:campaign_id] }) if params[:campaign_id].present?
-
-
# Apply search
-
if params[:search].present?
-
@journeys = @journeys.where("name LIKE ? OR description LIKE ?",
-
"%#{params[:search]}%", "%#{params[:search]}%")
-
end
-
-
# Apply sorting
-
case params[:sort_by]
-
when 'name'
-
@journeys = @journeys.order(:name)
-
when 'created_at'
-
@journeys = @journeys.order(:created_at)
-
when 'status'
-
@journeys = @journeys.order(:status)
-
else
-
@journeys = @journeys.order(updated_at: :desc)
-
end
-
-
@journeys = @journeys.includes(:campaign, :journey_steps, :user)
-
.page(params[:page])
-
.per(params[:per_page] || 12)
-
-
# Track activity
-
log_custom_activity('viewed_journeys_list', { count: @journeys.total_count })
-
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_journeys_for_json(@journeys) }
-
end
-
end
-
-
# GET /journeys/:id
-
def show
-
@journey_steps = @journey.journey_steps.includes(:transitions_from, :transitions_to).by_position
-
@campaign = @journey.campaign
-
@analytics_summary = @journey.analytics_summary(30)
-
@performance_score = @journey.latest_performance_score
-
-
# Track activity
-
log_custom_activity('viewed_journey', { journey_id: @journey.id, journey_name: @journey.name })
-
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_journey_for_json(@journey) }
-
end
-
end
-
-
# GET /journeys/new
-
def new
-
@journey = current_user.journeys.build
-
@campaigns = current_user.campaigns.active
-
@brands = current_user.brands
-
-
# Set defaults from template if provided
-
if params[:template_id].present?
-
@template = JourneyTemplate.find(params[:template_id])
-
@journey.name = @template.name
-
@journey.description = @template.description
-
@journey.campaign_type = @template.campaign_type
-
end
-
-
authorize @journey
-
-
respond_to do |format|
-
format.html
-
format.json { render json: { journey: serialize_journey_for_json(@journey) } }
-
end
-
end
-
-
# POST /journeys
-
def create
-
@journey = current_user.journeys.build(journey_params)
-
authorize @journey
-
-
respond_to do |format|
-
if @journey.save
-
# Track activity
-
log_custom_activity('created_journey', {
-
journey_id: @journey.id,
-
journey_name: @journey.name,
-
campaign_type: @journey.campaign_type
-
})
-
-
format.html { redirect_to @journey, notice: 'Journey was successfully created.' }
-
format.json { render json: serialize_journey_for_json(@journey), status: :created }
-
else
-
@campaigns = current_user.campaigns.active
-
@brands = current_user.brands
-
-
format.html { render :new, status: :unprocessable_entity }
-
format.json { render json: { errors: @journey.errors.as_json }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# GET /journeys/:id/edit
-
def edit
-
@campaigns = current_user.campaigns.active
-
@brands = current_user.brands
-
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_journey_for_json(@journey) }
-
end
-
end
-
-
# PATCH/PUT /journeys/:id
-
def update
-
respond_to do |format|
-
if @journey.update(journey_params)
-
# Track activity
-
log_custom_activity('updated_journey', {
-
journey_id: @journey.id,
-
journey_name: @journey.name,
-
changes: @journey.saved_changes.keys
-
})
-
-
format.html { redirect_to @journey, notice: 'Journey was successfully updated.' }
-
format.json { render json: serialize_journey_for_json(@journey) }
-
else
-
@campaigns = current_user.campaigns.active
-
@brands = current_user.brands
-
-
format.html { render :edit, status: :unprocessable_entity }
-
format.json { render json: { errors: @journey.errors.as_json }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# DELETE /journeys/:id
-
def destroy
-
journey_name = @journey.name
-
@journey.destroy!
-
-
# Track activity
-
log_custom_activity('deleted_journey', {
-
journey_name: journey_name,
-
journey_id: params[:id]
-
})
-
-
respond_to do |format|
-
format.html { redirect_to journeys_path, notice: 'Journey was successfully deleted.' }
-
format.json { render json: { message: 'Journey was successfully deleted.' } }
-
end
-
end
-
-
# POST /journeys/:id/duplicate
-
def duplicate
-
begin
-
@new_journey = @journey.duplicate
-
-
# Track activity
-
log_custom_activity('duplicated_journey', {
-
original_journey_id: @journey.id,
-
new_journey_id: @new_journey.id,
-
journey_name: @new_journey.name
-
})
-
-
respond_to do |format|
-
format.html { redirect_to @new_journey, notice: 'Journey was successfully duplicated.' }
-
format.json { render json: serialize_journey_for_json(@new_journey), status: :created }
-
end
-
rescue => e
-
respond_to do |format|
-
format.html { redirect_to @journey, alert: "Failed to duplicate journey: #{e.message}" }
-
format.json { render json: { error: "Failed to duplicate journey: #{e.message}" }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# POST /journeys/:id/publish
-
def publish
-
respond_to do |format|
-
if @journey.publish!
-
# Track activity
-
log_custom_activity('published_journey', {
-
journey_id: @journey.id,
-
journey_name: @journey.name
-
})
-
-
format.html { redirect_to @journey, notice: 'Journey was successfully published.' }
-
format.json { render json: serialize_journey_for_json(@journey) }
-
else
-
format.html { redirect_to @journey, alert: 'Failed to publish journey.' }
-
format.json { render json: { errors: @journey.errors.as_json }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# POST /journeys/:id/archive
-
def archive
-
respond_to do |format|
-
if @journey.archive!
-
# Track activity
-
log_custom_activity('archived_journey', {
-
journey_id: @journey.id,
-
journey_name: @journey.name
-
})
-
-
format.html { redirect_to @journey, notice: 'Journey was successfully archived.' }
-
format.json { render json: serialize_journey_for_json(@journey) }
-
else
-
format.html { redirect_to @journey, alert: 'Failed to archive journey.' }
-
format.json { render json: { errors: @journey.errors.as_json }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# GET /journeys/:id/builder
-
def builder
-
@journey_steps = @journey.journey_steps.includes(:transitions_from, :transitions_to).by_position
-
-
# Track activity
-
log_custom_activity('opened_journey_builder', {
-
journey_id: @journey.id,
-
journey_name: @journey.name
-
})
-
-
respond_to do |format|
-
format.html
-
format.json { render json: serialize_journey_for_builder(@journey) }
-
end
-
end
-
-
private
-
-
def set_journey
-
@journey = Journey.find(params[:id])
-
end
-
-
def ensure_user_can_access_journey
-
authorize @journey
-
end
-
-
def journey_params
-
permitted_params = params.require(:journey).permit(
-
:name, :description, :campaign_type, :target_audience, :status,
-
:campaign_id, :brand_id, :goals, metadata: {}, settings: {}
-
)
-
-
# Handle goals conversion from string to array
-
if permitted_params[:goals].is_a?(String)
-
permitted_params[:goals] = permitted_params[:goals].split("\n").map(&:strip).reject(&:blank?)
-
end
-
-
permitted_params
-
end
-
-
def serialize_journeys_for_json(journeys)
-
{
-
journeys: journeys.map { |journey| serialize_journey_summary(journey) },
-
pagination: {
-
current_page: journeys.current_page,
-
total_pages: journeys.total_pages,
-
total_count: journeys.total_count,
-
per_page: journeys.limit_value
-
}
-
}
-
end
-
-
def serialize_journey_for_json(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience,
-
goals: journey.goals,
-
metadata: journey.metadata,
-
settings: journey.settings,
-
campaign_id: journey.campaign_id,
-
brand_id: journey.brand_id,
-
campaign: journey.campaign ? serialize_campaign_summary(journey.campaign) : nil,
-
brand: journey.brand ? serialize_brand_summary(journey.brand) : nil,
-
step_count: journey.total_steps,
-
steps_by_stage: journey.steps_by_stage,
-
created_at: journey.created_at,
-
updated_at: journey.updated_at,
-
published_at: journey.published_at,
-
archived_at: journey.archived_at,
-
performance_score: journey.latest_performance_score,
-
ab_test_status: journey.ab_test_status
-
}
-
end
-
-
def serialize_journey_summary(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
campaign_type: journey.campaign_type,
-
campaign_id: journey.campaign_id,
-
campaign_name: journey.campaign&.name,
-
brand_id: journey.brand_id,
-
brand_name: journey.brand&.name,
-
step_count: journey.total_steps,
-
created_at: journey.created_at,
-
updated_at: journey.updated_at,
-
published_at: journey.published_at,
-
performance_score: journey.latest_performance_score
-
}
-
end
-
-
def serialize_campaign_summary(campaign)
-
{
-
id: campaign.id,
-
name: campaign.name,
-
campaign_type: campaign.campaign_type,
-
status: campaign.status
-
}
-
end
-
-
def serialize_brand_summary(brand)
-
{
-
id: brand.id,
-
name: brand.name,
-
industry: brand.industry,
-
status: brand.status
-
}
-
end
-
-
def serialize_journey_for_builder(journey)
-
{
-
id: journey.id,
-
name: journey.name,
-
description: journey.description,
-
status: journey.status,
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience,
-
goals: journey.goals,
-
metadata: journey.metadata,
-
settings: journey.settings,
-
campaign_id: journey.campaign_id,
-
brand_id: journey.brand_id,
-
steps: serialize_journey_steps_for_builder(journey.journey_steps.by_position),
-
created_at: journey.created_at,
-
updated_at: journey.updated_at
-
}
-
end
-
-
def serialize_journey_steps_for_builder(steps)
-
steps.map do |step|
-
{
-
id: step.id,
-
name: step.name,
-
description: step.description,
-
stage: step.stage,
-
position: {
-
x: step.metadata&.dig('canvas', 'x') || (step.position * 300 + 100),
-
y: step.metadata&.dig('canvas', 'y') || 100
-
},
-
step_position: step.position,
-
content_type: step.content_type,
-
channel: step.channel,
-
duration_days: step.duration_days,
-
config: step.config || {},
-
conditions: step.conditions || {},
-
metadata: step.metadata || {},
-
is_entry_point: step.is_entry_point,
-
is_exit_point: step.is_exit_point,
-
transitions_from: step.transitions_from.map { |t| {
-
id: t.id,
-
to_step_id: t.to_step_id,
-
conditions: t.conditions || {},
-
transition_type: t.transition_type
-
}},
-
transitions_to: step.transitions_to.map { |t| {
-
id: t.id,
-
from_step_id: t.from_step_id,
-
conditions: t.conditions || {},
-
transition_type: t.transition_type
-
}}
-
}
-
end
-
end
-
end
-
class MessagingFrameworksController < ApplicationController
-
before_action :set_brand
-
before_action :set_messaging_framework
-
-
def show
-
respond_to do |format|
-
format.html
-
format.json { render json: framework_json }
-
end
-
end
-
-
def edit
-
end
-
-
def update
-
respond_to do |format|
-
if @messaging_framework.update(messaging_framework_params)
-
format.html { redirect_to brand_messaging_framework_path(@brand), notice: 'Messaging framework was successfully updated.' }
-
format.json { render json: { success: true, messaging_framework: framework_json } }
-
else
-
format.html { render :edit, status: :unprocessable_entity }
-
format.json { render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity }
-
end
-
end
-
end
-
-
# AJAX Actions for specific updates
-
def update_key_messages
-
if @messaging_framework.update(key_messages: params[:key_messages])
-
render json: { success: true, key_messages: @messaging_framework.key_messages }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def update_value_propositions
-
if @messaging_framework.update(value_propositions: params[:value_propositions])
-
render json: { success: true, value_propositions: @messaging_framework.value_propositions }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def update_terminology
-
if @messaging_framework.update(terminology: params[:terminology])
-
render json: { success: true, terminology: @messaging_framework.terminology }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def update_approved_phrases
-
if @messaging_framework.update(approved_phrases: params[:approved_phrases])
-
render json: { success: true, approved_phrases: @messaging_framework.approved_phrases }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def update_banned_words
-
if @messaging_framework.update(banned_words: params[:banned_words])
-
render json: { success: true, banned_words: @messaging_framework.banned_words }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def update_tone_attributes
-
if @messaging_framework.update(tone_attributes: params[:tone_attributes])
-
render json: { success: true, tone_attributes: @messaging_framework.tone_attributes }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def validate_content
-
content = params[:content]
-
validation_results = {
-
banned_words: @messaging_framework.get_banned_words_in_text(content),
-
contains_banned: @messaging_framework.contains_banned_words?(content),
-
tone_match: analyze_tone_match(content),
-
approved_phrases_used: find_approved_phrases_in_text(content)
-
}
-
render json: validation_results
-
end
-
-
def export
-
respond_to do |format|
-
format.json { render json: @messaging_framework.to_json }
-
format.csv { send_data generate_csv, filename: "messaging-framework-#{@brand.name.parameterize}-#{Date.today}.csv" }
-
end
-
end
-
-
def import
-
if params[:file].present?
-
result = import_framework_data(params[:file])
-
if result[:success]
-
render json: { success: true, message: 'Framework imported successfully' }
-
else
-
render json: { success: false, errors: result[:errors] }, status: :unprocessable_entity
-
end
-
else
-
render json: { success: false, errors: ['No file uploaded'] }, status: :unprocessable_entity
-
end
-
end
-
-
def ai_suggestions
-
content_type = params[:content_type]
-
current_content = params[:current_content]
-
-
suggestions = generate_ai_suggestions(content_type, current_content)
-
render json: { suggestions: suggestions }
-
end
-
-
def reorder_key_messages
-
category = params[:category]
-
ordered_ids = params[:ordered_ids]
-
-
if @messaging_framework.key_messages[category]
-
reordered_messages = ordered_ids.map do |id|
-
@messaging_framework.key_messages[category][id.to_i]
-
end.compact
-
-
@messaging_framework.key_messages[category] = reordered_messages
-
-
if @messaging_framework.save
-
render json: { success: true, key_messages: @messaging_framework.key_messages }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
else
-
render json: { success: false, errors: ['Category not found'] }, status: :not_found
-
end
-
end
-
-
def reorder_value_propositions
-
proposition_type = params[:proposition_type]
-
ordered_ids = params[:ordered_ids]
-
-
if @messaging_framework.value_propositions[proposition_type]
-
reordered_props = ordered_ids.map do |id|
-
@messaging_framework.value_propositions[proposition_type][id.to_i]
-
end.compact
-
-
@messaging_framework.value_propositions[proposition_type] = reordered_props
-
-
if @messaging_framework.save
-
render json: { success: true, value_propositions: @messaging_framework.value_propositions }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
else
-
render json: { success: false, errors: ['Proposition type not found'] }, status: :not_found
-
end
-
end
-
-
def add_key_message
-
category = params[:category]
-
message = params[:message]
-
-
@messaging_framework.key_messages ||= {}
-
@messaging_framework.key_messages[category] ||= []
-
@messaging_framework.key_messages[category] << message
-
-
if @messaging_framework.save
-
render json: { success: true, key_messages: @messaging_framework.key_messages }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def remove_key_message
-
category = params[:category]
-
index = params[:index].to_i
-
-
if @messaging_framework.key_messages[category]
-
@messaging_framework.key_messages[category].delete_at(index)
-
-
if @messaging_framework.save
-
render json: { success: true, key_messages: @messaging_framework.key_messages }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
else
-
render json: { success: false, errors: ['Category not found'] }, status: :not_found
-
end
-
end
-
-
def add_value_proposition
-
proposition_type = params[:proposition_type]
-
proposition = params[:proposition]
-
-
@messaging_framework.value_propositions ||= {}
-
@messaging_framework.value_propositions[proposition_type] ||= []
-
@messaging_framework.value_propositions[proposition_type] << proposition
-
-
if @messaging_framework.save
-
render json: { success: true, value_propositions: @messaging_framework.value_propositions }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
end
-
-
def remove_value_proposition
-
proposition_type = params[:proposition_type]
-
index = params[:index].to_i
-
-
if @messaging_framework.value_propositions[proposition_type]
-
@messaging_framework.value_propositions[proposition_type].delete_at(index)
-
-
if @messaging_framework.save
-
render json: { success: true, value_propositions: @messaging_framework.value_propositions }
-
else
-
render json: { success: false, errors: @messaging_framework.errors.full_messages }, status: :unprocessable_entity
-
end
-
else
-
render json: { success: false, errors: ['Proposition type not found'] }, status: :not_found
-
end
-
end
-
-
def search_approved_phrases
-
query = params[:query].to_s.downcase
-
phrases = @messaging_framework.approved_phrases || []
-
-
filtered_phrases = if query.present?
-
phrases.select { |phrase| phrase.downcase.include?(query) }
-
else
-
phrases
-
end
-
-
render json: { phrases: filtered_phrases }
-
end
-
-
private
-
-
def set_brand
-
@brand = current_user.brands.find(params[:brand_id])
-
end
-
-
def set_messaging_framework
-
@messaging_framework = @brand.messaging_framework || @brand.create_messaging_framework!
-
end
-
-
def messaging_framework_params
-
params.require(:messaging_framework).permit(
-
:tagline,
-
:mission_statement,
-
:vision_statement,
-
:active,
-
key_messages: {},
-
value_propositions: {},
-
terminology: {},
-
approved_phrases: [],
-
banned_words: [],
-
tone_attributes: {}
-
)
-
end
-
-
def framework_json
-
{
-
id: @messaging_framework.id,
-
tagline: @messaging_framework.tagline,
-
mission_statement: @messaging_framework.mission_statement,
-
vision_statement: @messaging_framework.vision_statement,
-
key_messages: @messaging_framework.key_messages || {},
-
value_propositions: @messaging_framework.value_propositions || {},
-
terminology: @messaging_framework.terminology || {},
-
approved_phrases: @messaging_framework.approved_phrases || [],
-
banned_words: @messaging_framework.banned_words || [],
-
tone_attributes: @messaging_framework.tone_attributes || {},
-
active: @messaging_framework.active
-
}
-
end
-
-
def analyze_tone_match(content)
-
# Simple tone analysis - in production, this would use NLP
-
tone = @messaging_framework.tone_attributes || {}
-
-
{
-
formality: tone['formality'] || 'neutral',
-
matches_tone: true, # Simplified for now
-
suggestions: []
-
}
-
end
-
-
def find_approved_phrases_in_text(content)
-
return [] unless @messaging_framework.approved_phrases.present?
-
-
@messaging_framework.approved_phrases.select do |phrase|
-
content.downcase.include?(phrase.downcase)
-
end
-
end
-
-
def generate_csv
-
require 'csv'
-
-
CSV.generate(headers: true) do |csv|
-
csv << ['Section', 'Key', 'Value']
-
-
# Export key messages
-
(@messaging_framework.key_messages || {}).each do |category, messages|
-
messages.each { |msg| csv << ['Key Messages', category, msg] }
-
end
-
-
# Export value propositions
-
(@messaging_framework.value_propositions || {}).each do |type, props|
-
props.each { |prop| csv << ['Value Propositions', type, prop] }
-
end
-
-
# Export terminology
-
(@messaging_framework.terminology || {}).each do |term, definition|
-
csv << ['Terminology', term, definition]
-
end
-
-
# Export approved phrases
-
(@messaging_framework.approved_phrases || []).each do |phrase|
-
csv << ['Approved Phrases', '', phrase]
-
end
-
-
# Export banned words
-
(@messaging_framework.banned_words || []).each do |word|
-
csv << ['Banned Words', '', word]
-
end
-
-
# Export tone attributes
-
(@messaging_framework.tone_attributes || {}).each do |attr, value|
-
csv << ['Tone Attributes', attr, value]
-
end
-
end
-
end
-
-
def import_framework_data(file)
-
# Handle JSON import
-
if file.content_type == 'application/json'
-
begin
-
data = JSON.parse(file.read)
-
@messaging_framework.update!(data.slice(*%w[key_messages value_propositions terminology approved_phrases banned_words tone_attributes tagline mission_statement vision_statement]))
-
{ success: true }
-
rescue => e
-
{ success: false, errors: [e.message] }
-
end
-
else
-
{ success: false, errors: ['Unsupported file type. Please upload a JSON file.'] }
-
end
-
end
-
-
def generate_ai_suggestions(content_type, current_content)
-
# In production, this would call your AI service
-
# For now, return sample suggestions
-
case content_type
-
when 'key_messages'
-
[
-
"Focus on customer benefits rather than features",
-
"Include emotional appeal alongside rational arguments",
-
"Ensure consistency with brand voice"
-
]
-
when 'value_propositions'
-
[
-
"Lead with the primary benefit",
-
"Quantify value where possible",
-
"Differentiate from competitors"
-
]
-
when 'tagline'
-
[
-
"Keep it under 7 words for memorability",
-
"Include a unique brand element",
-
"Make it actionable or aspirational"
-
]
-
else
-
["No suggestions available for this content type"]
-
end
-
end
-
end
-
class PasswordsController < ApplicationController
-
allow_unauthenticated_access
-
before_action :set_user_by_token, only: %i[ edit update ]
-
-
# Rate limit password reset requests to prevent abuse
-
rate_limit to: 5, within: 1.hour, only: :create, with: -> {
-
redirect_to new_password_path, alert: "Too many password reset requests. Please try again later."
-
}
-
-
def new
-
end
-
-
def create
-
if user = User.find_by(email_address: params[:email_address])
-
PasswordsMailer.reset(user).deliver_later
-
end
-
-
redirect_to new_session_path, notice: "Password reset instructions sent (if user with that email address exists)."
-
end
-
-
def edit
-
end
-
-
def update
-
if @user.update(user_params)
-
redirect_to new_session_path, notice: "Password has been reset."
-
else
-
flash.now[:alert] = @user.errors.full_messages.to_sentence
-
render :edit, status: :unprocessable_entity
-
end
-
end
-
-
private
-
def set_user_by_token
-
@user = User.find_by_password_reset_token!(params[:token])
-
rescue ActiveSupport::MessageVerifier::InvalidSignature
-
redirect_to new_password_path, alert: "Password reset link is invalid or has expired."
-
end
-
-
def user_params
-
params.permit(:password, :password_confirmation)
-
end
-
end
-
1
class ProfilesController < ApplicationController
-
1
before_action :set_user
-
1
before_action :authorize_user
-
-
# Rate limit profile updates to prevent abuse
-
1
rate_limit to: 30, within: 1.hour, only: :update, with: -> {
-
redirect_to edit_profile_path, alert: "Too many update attempts. Please try again later."
-
}
-
-
1
def show
-
end
-
-
1
def edit
-
end
-
-
1
def update
-
3
if @user.update(user_params)
-
2
redirect_to profile_path, notice: "Profile updated successfully."
-
else
-
1
render :edit, status: :unprocessable_entity
-
end
-
end
-
-
1
private
-
-
1
def set_user
-
8
@user = current_user
-
end
-
-
1
def authorize_user
-
# Users can only view/edit their own profile
-
8
redirect_to root_path, alert: "Not authorized" unless @user == current_user
-
end
-
-
1
def user_params
-
3
params.require(:user).permit(
-
:full_name,
-
:bio,
-
:phone_number,
-
:company,
-
:job_title,
-
:timezone,
-
:notification_email,
-
:notification_marketing,
-
:notification_product,
-
:avatar
-
)
-
end
-
end
-
module RailsAdmin
-
class ApplicationController < ::ApplicationController
-
include AdminAuditable
-
-
# Override to ensure we capture Rails Admin specific objects
-
before_action :set_auditable_object
-
-
private
-
-
def set_auditable_object
-
if params[:model_name].present?
-
@model_name = params[:model_name]
-
@abstract_model = RailsAdmin::AbstractModel.new(@model_name)
-
-
if params[:id].present?
-
@object = @abstract_model.get(params[:id])
-
elsif action_name == "new"
-
@object = @abstract_model.model.new
-
end
-
end
-
end
-
-
def _current_user
-
current_user
-
end
-
end
-
end
-
1
class RegistrationsController < ApplicationController
-
1
allow_unauthenticated_access
-
-
# Rate limit registration attempts to prevent abuse
-
1
rate_limit to: 5, within: 1.hour, only: :create, with: -> {
-
redirect_to new_registration_path, alert: "Too many registration attempts. Please try again later."
-
}
-
-
1
def new
-
1
@user = User.new
-
end
-
-
1
def create
-
2
@user = User.new(user_params)
-
-
2
if @user.save
-
1
start_new_session_for(@user)
-
1
redirect_to root_path, notice: "Welcome! You have successfully signed up."
-
else
-
1
render :new, status: :unprocessable_entity
-
end
-
end
-
-
1
private
-
-
1
def user_params
-
2
params.require(:user).permit(:email_address, :password, :password_confirmation)
-
end
-
end
-
2
require 'ostruct'
-
-
2
class SessionsController < ApplicationController
-
2
allow_unauthenticated_access only: %i[ new create ]
-
2
rate_limit to: 10, within: 3.minutes, only: :create, with: -> { redirect_to new_session_url, alert: "Try again later." }
-
-
2
def new
-
end
-
-
2
def create
-
7
if user = User.authenticate_by(params.permit(:email_address, :password))
-
6
if user.locked?
-
log_authentication_activity(user, success: false, reason: "account_locked")
-
redirect_to new_session_path, alert: "Your account has been locked: #{user.lock_reason}"
-
6
elsif user.suspended?
-
log_authentication_activity(user, success: false, reason: "account_suspended")
-
redirect_to new_session_path, alert: "Your account has been suspended: #{user.suspension_reason}"
-
else
-
6
start_new_session_for(user, remember_me: params[:remember_me] == "1")
-
6
log_authentication_activity(user, success: true)
-
6
redirect_to after_authentication_url
-
end
-
else
-
# Log failed authentication attempt if we can identify the user
-
1
if params[:email_address].present?
-
1
failed_user = User.find_by(email_address: params[:email_address])
-
1
log_authentication_activity(failed_user, success: false, reason: "invalid_credentials") if failed_user
-
end
-
1
redirect_to new_session_path, alert: "Try another email address or password."
-
end
-
end
-
-
2
def destroy
-
1
terminate_session
-
1
redirect_to new_session_path
-
end
-
-
2
private
-
-
2
def log_authentication_activity(user, success:, reason: nil)
-
7
return unless user
-
-
metadata = {
-
7
success: success,
-
reason: reason,
-
ip_address: request.remote_ip,
-
user_agent: request.user_agent
-
}.compact
-
-
7
activity = Activity.log_activity(
-
user: user,
-
action: "create",
-
controller: "sessions",
-
request: request,
-
7
response: OpenStruct.new(status: success ? 302 : 401),
-
metadata: metadata
-
)
-
-
# Check for suspicious activity
-
7
if activity.persisted?
-
7
SuspiciousActivityDetector.new(activity).check
-
end
-
rescue => e
-
Rails.logger.error "Failed to log authentication activity: #{e.message}"
-
end
-
end
-
class UserSessionsController < ApplicationController
-
before_action :set_session, only: :destroy
-
-
def index
-
@sessions = current_user.sessions.active.order(last_active_at: :desc)
-
@current_session = Current.session
-
end
-
-
def destroy
-
if @session == Current.session
-
# Can't destroy current session from this page
-
redirect_to user_sessions_path, alert: "You cannot end your current session from here. Use Sign Out instead."
-
else
-
@session.destroy
-
redirect_to user_sessions_path, notice: "Session ended successfully."
-
end
-
end
-
-
private
-
-
def set_session
-
@session = current_user.sessions.find(params[:id])
-
rescue ActiveRecord::RecordNotFound
-
head :not_found
-
end
-
end
-
class UsersController < ApplicationController
-
before_action :set_user, only: [:show]
-
-
def index
-
@users = policy_scope(User)
-
authorize User
-
end
-
-
def show
-
authorize @user
-
end
-
-
private
-
-
def set_user
-
@user = User.find(params[:id])
-
end
-
end
-
2
module ActivitiesHelper
-
end
-
2
module Api::V1::BrandComplianceHelper
-
end
-
2
module ApplicationHelper
-
end
-
2
module BrandAssetsHelper
-
end
-
2
module BrandGuidelinesHelper
-
end
-
2
module BrandsHelper
-
end
-
2
module JourneyTemplatesHelper
-
end
-
2
module MessagingFrameworksHelper
-
end
-
2
module ProfilesHelper
-
end
-
2
module RailsAdmin
-
2
module DashboardHelper
-
2
def user_growth_percentage
-
current_count = User.where(created_at: Date.current.beginning_of_month..Date.current.end_of_month).count
-
previous_count = User.where(created_at: 1.month.ago.beginning_of_month..1.month.ago.end_of_month).count
-
-
return 0 if previous_count.zero?
-
((current_count - previous_count).to_f / previous_count * 100).round(2)
-
end
-
-
2
def activity_trend_percentage
-
current_count = Activity.where(occurred_at: Date.current.beginning_of_day..Date.current.end_of_day).count
-
previous_count = Activity.where(occurred_at: 1.day.ago.beginning_of_day..1.day.ago.end_of_day).count
-
-
return 0 if previous_count.zero?
-
((current_count - previous_count).to_f / previous_count * 100).round(2)
-
end
-
-
2
def system_health_status
-
error_rate = calculate_error_rate(24.hours)
-
avg_response_time = calculate_average_response_time(24.hours)
-
-
if error_rate > 5 || (avg_response_time && avg_response_time > 1.0)
-
{ status: "warning", color: "warning", icon: "exclamation-triangle" }
-
elsif error_rate > 10
-
{ status: "critical", color: "danger", icon: "times-circle" }
-
else
-
{ status: "healthy", color: "success", icon: "check-circle" }
-
end
-
end
-
-
2
private
-
-
2
def calculate_error_rate(time_window)
-
total = Activity.where(occurred_at: time_window.ago..Time.current).count
-
return 0 if total.zero?
-
-
errors = Activity.where(response_status: 400..599, occurred_at: time_window.ago..Time.current).count
-
(errors.to_f / total * 100).round(2)
-
end
-
-
2
def calculate_average_response_time(time_window)
-
Activity.where.not(response_time: nil)
-
.where(occurred_at: time_window.ago..Time.current)
-
.average(:response_time)
-
end
-
end
-
end
-
2
module RegistrationsHelper
-
end
-
2
module UserSessionsHelper
-
2
def parse_user_agent(user_agent_string)
-
return "Unknown" if user_agent_string.blank?
-
-
# Simple user agent parsing - in production, consider using a gem like 'browser'
-
case user_agent_string
-
when /Chrome\/(\d+)/
-
"Chrome #{$1}"
-
when /Safari\/(\d+)/
-
"Safari"
-
when /Firefox\/(\d+)/
-
"Firefox #{$1}"
-
when /Edge\/(\d+)/
-
"Edge #{$1}"
-
when /MSIE (\d+)/
-
"Internet Explorer #{$1}"
-
else
-
user_agent_string.truncate(50)
-
end
-
end
-
end
-
class ActivityCleanupJob < ApplicationJob
-
queue_as :low
-
-
def perform
-
# Get retention period from configuration
-
retention_days = Rails.application.config.activity_tracking.retention_days || 90
-
cutoff_date = retention_days.days.ago
-
-
# Log the cleanup operation
-
ActivityLogger.log(:info, "Starting activity cleanup", {
-
retention_days: retention_days,
-
cutoff_date: cutoff_date
-
})
-
-
# Delete old activities in batches to avoid locking the table
-
total_deleted = 0
-
-
loop do
-
deleted_count = Activity
-
.where("occurred_at < ?", cutoff_date)
-
.where(suspicious: false) # Keep suspicious activities longer
-
.limit(1000)
-
.delete_all
-
-
total_deleted += deleted_count
-
-
break if deleted_count < 1000
-
-
# Small delay to prevent database overload
-
sleep 0.1
-
end
-
-
# Clean up old user activities (if using the separate model)
-
if defined?(UserActivity)
-
UserActivity.where("performed_at < ?", cutoff_date).delete_all
-
end
-
-
# Log completion
-
ActivityLogger.log(:info, "Activity cleanup completed", {
-
total_deleted: total_deleted,
-
cutoff_date: cutoff_date
-
})
-
-
# Run database optimization
-
optimize_database_tables
-
end
-
-
private
-
-
def optimize_database_tables
-
# Optimize the activities table after bulk deletion
-
if ActiveRecord::Base.connection.adapter_name == 'PostgreSQL'
-
ActiveRecord::Base.connection.execute('VACUUM ANALYZE activities')
-
elsif ActiveRecord::Base.connection.adapter_name.include?('SQLite')
-
ActiveRecord::Base.connection.execute('VACUUM')
-
end
-
rescue => e
-
Rails.logger.error "Failed to optimize database: #{e.message}"
-
end
-
end
-
class ApplicationJob < ActiveJob::Base
-
# Automatically retry jobs that encountered a deadlock
-
# retry_on ActiveRecord::Deadlocked
-
-
# Most jobs are safe to ignore if the underlying records are no longer available
-
# discard_on ActiveJob::DeserializationError
-
end
-
class BrandAnalysisJob < ApplicationJob
-
queue_as :low_priority
-
-
retry_on StandardError, wait: :exponentially_longer, attempts: 3
-
-
def perform(analysis_id)
-
analysis = BrandAnalysis.find(analysis_id)
-
brand = analysis.brand
-
-
# Initialize service with options from analysis metadata
-
options = {
-
llm_provider: analysis.analysis_data['llm_provider'],
-
temperature: analysis.analysis_data['temperature'] || 0.7
-
}
-
-
service = Branding::AnalysisService.new(brand, nil, options)
-
-
# Perform the actual analysis
-
if service.perform_analysis(analysis)
-
Rails.logger.info "Successfully analyzed brand #{brand.id} - Analysis #{analysis.id}"
-
-
# Notify user or trigger follow-up actions
-
BrandAnalysisNotificationJob.perform_later(brand, analysis.id)
-
-
# Trigger content generation suggestions if enabled
-
if brand.auto_generate_suggestions?
-
ContentSuggestionJob.perform_later(brand, analysis.id)
-
end
-
else
-
Rails.logger.error "Failed to analyze brand #{brand.id} - Analysis #{analysis.id}"
-
-
# Notify user of failure
-
BrandAnalysisNotificationJob.perform_later(brand, analysis.id, failed: true)
-
end
-
rescue ActiveRecord::RecordNotFound => e
-
Rails.logger.error "Analysis not found: #{analysis_id} - #{e.message}"
-
rescue StandardError => e
-
Rails.logger.error "Brand analysis error: #{e.message}\n#{e.backtrace.join("\n")}"
-
-
# Mark analysis as failed if we can
-
if defined?(analysis) && analysis
-
analysis.mark_as_failed!("Job error: #{e.message}")
-
end
-
-
raise # Re-raise for retry logic
-
end
-
end
-
class BrandAnalysisNotificationJob < ApplicationJob
-
queue_as :default
-
-
def perform(brand)
-
# This would send notification to user about completed analysis
-
# For now, we'll just log it
-
Rails.logger.info "Brand analysis completed for #{brand.name} (ID: #{brand.id})"
-
-
# In production, you might:
-
# - Send an email notification
-
# - Create an in-app notification
-
# - Broadcast via ActionCable
-
# - Update a dashboard metric
-
end
-
end
-
class BrandAssetProcessingJob < ApplicationJob
-
queue_as :default
-
-
def perform(brand_asset)
-
return unless brand_asset.file.attached?
-
-
processor = Branding::AssetProcessor.new(brand_asset)
-
-
if processor.process
-
Rails.logger.info "Successfully processed brand asset #{brand_asset.id}"
-
-
# Trigger brand analysis if this is the first processed asset
-
if brand_asset.brand.brand_assets.processed.count == 1
-
BrandAnalysisJob.perform_later(brand_asset.brand)
-
end
-
else
-
Rails.logger.error "Failed to process brand asset #{brand_asset.id}: #{processor.errors.join(', ')}"
-
end
-
end
-
end
-
class BrandComplianceJob < ApplicationJob
-
queue_as :default
-
-
# Retry configuration for transient failures
-
retry_on StandardError, wait: :exponentially_longer, attempts: 3
-
-
# Discard jobs with permanent failures after retries
-
discard_on ActiveJob::DeserializationError
-
-
def perform(brand_id, content, content_type, options = {})
-
brand = Brand.find(brand_id)
-
-
# Initialize event broadcaster if real-time updates are enabled
-
broadcaster = if options[:broadcast_events]
-
Branding::Compliance::EventBroadcaster.new(
-
brand_id,
-
options[:session_id],
-
options[:user_id]
-
)
-
end
-
-
# Broadcast start event
-
broadcaster&.broadcast_validation_start({
-
type: content_type,
-
length: content.length,
-
validators: determine_validators(content_type, options)
-
})
-
-
# Perform compliance check
-
service = Branding::ComplianceServiceV2.new(brand, content, content_type, options)
-
results = service.check_compliance
-
-
# Store results if requested
-
if options[:store_results]
-
store_compliance_results(brand, results, options)
-
end
-
-
# Broadcast completion
-
broadcaster&.broadcast_validation_complete(results)
-
-
# Send notifications if needed
-
send_notifications(brand, results, options) if options[:notify]
-
-
# Return results for job tracking
-
results
-
rescue StandardError => e
-
handle_job_error(e, broadcaster, options)
-
raise # Re-raise for retry mechanism
-
end
-
-
private
-
-
def determine_validators(content_type, options)
-
validators = ["Rule Engine"]
-
validators << "NLP Analyzer" unless content_type.include?("visual")
-
validators << "Visual Validator" if content_type.include?("visual") || content_type.include?("image")
-
validators
-
end
-
-
def store_compliance_results(brand, results, options)
-
ComplianceResult.create!(
-
brand: brand,
-
content_type: options[:content_type],
-
content_hash: Digest::SHA256.hexdigest(options[:content_identifier] || ""),
-
compliant: results[:compliant],
-
score: results[:score],
-
violations_count: results[:violations]&.count || 0,
-
violations_data: results[:violations],
-
suggestions_data: results[:suggestions],
-
analysis_data: results[:analysis],
-
metadata: {
-
processing_time: results[:metadata][:processing_time],
-
validators_used: results[:metadata][:validators_used],
-
options: options.except(:content)
-
}
-
)
-
rescue StandardError => e
-
Rails.logger.error "Failed to store compliance results: #{e.message}"
-
end
-
-
def send_notifications(brand, results, options)
-
return if results[:compliant] && !options[:notify_on_success]
-
-
# Determine notification recipients
-
recipients = determine_recipients(brand, options)
-
-
# Send appropriate notifications
-
if results[:compliant]
-
ComplianceMailer.compliance_passed(brand, results, recipients).deliver_later
-
else
-
ComplianceMailer.compliance_failed(brand, results, recipients).deliver_later
-
end
-
-
# Send in-app notifications if enabled
-
if options[:in_app_notifications]
-
create_in_app_notifications(brand, results, recipients)
-
end
-
end
-
-
def determine_recipients(brand, options)
-
recipients = []
-
-
# Brand owner
-
recipients << brand.user if options[:notify_owner]
-
-
# Specified users
-
if options[:notify_users]
-
recipients.concat(User.where(id: options[:notify_users]))
-
end
-
-
# Team members with appropriate permissions
-
if options[:notify_team]
-
recipients.concat(brand.team_members.with_permission(:view_compliance))
-
end
-
-
recipients.uniq
-
end
-
-
def create_in_app_notifications(brand, results, recipients)
-
recipients.each do |recipient|
-
Notification.create!(
-
user: recipient,
-
notifiable: brand,
-
action: results[:compliant] ? "compliance_passed" : "compliance_failed",
-
data: {
-
score: results[:score],
-
violations_count: results[:violations]&.count || 0,
-
summary: results[:summary]
-
}
-
)
-
end
-
end
-
-
def handle_job_error(error, broadcaster, options)
-
Rails.logger.error "Compliance job error: #{error.message}"
-
Rails.logger.error error.backtrace.join("\n")
-
-
# Broadcast error event
-
broadcaster&.broadcast_error({
-
type: error.class.name,
-
message: error.message,
-
recoverable: !error.is_a?(ActiveRecord::RecordNotFound)
-
})
-
-
# Store error information if requested
-
if options[:store_errors]
-
ComplianceError.create!(
-
brand_id: options[:brand_id],
-
error_type: error.class.name,
-
error_message: error.message,
-
error_backtrace: error.backtrace,
-
job_params: options
-
)
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class CacheWarmerJob < ApplicationJob
-
queue_as :low
-
-
def perform(brand_id)
-
brand = Brand.find(brand_id)
-
CacheService.preload_brand_cache(brand)
-
end
-
end
-
end
-
end
-
class JourneySuggestionsCacheWarmupJob < ApplicationJob
-
queue_as :low_priority
-
-
def perform
-
return unless cache_warming_enabled?
-
-
Rails.logger.info "Starting journey suggestions cache warmup"
-
-
# Warm cache for active journeys with recent activity
-
active_journeys = Journey.published
-
.joins(:journey_executions)
-
.where('journey_executions.updated_at > ?', 7.days.ago)
-
.distinct
-
.limit(batch_size)
-
-
active_journeys.find_each do |journey|
-
warm_journey_cache(journey)
-
end
-
-
Rails.logger.info "Completed journey suggestions cache warmup for #{active_journeys.count} journeys"
-
end
-
-
private
-
-
def cache_warming_enabled?
-
Rails.application.config.journey_suggestions[:cache_warming][:enabled]
-
end
-
-
def batch_size
-
Rails.application.config.journey_suggestions[:cache_warming][:batch_size]
-
end
-
-
def warm_journey_cache(journey)
-
return unless journey.user
-
-
# Warm suggestions cache for common scenarios
-
common_providers = [:openai, :anthropic]
-
common_filters = [
-
{},
-
{ stage: 'awareness' },
-
{ stage: 'conversion' },
-
{ content_type: 'email' }
-
]
-
-
common_providers.each do |provider|
-
common_filters.each do |filters|
-
begin
-
engine = JourneySuggestionEngine.new(
-
journey: journey,
-
user: journey.user,
-
provider: provider
-
)
-
-
# Generate suggestions to populate cache
-
engine.generate_suggestions(filters)
-
-
sleep(0.1) # Rate limiting
-
rescue => e
-
Rails.logger.warn "Cache warmup failed for journey #{journey.id} with provider #{provider}: #{e.message}"
-
end
-
end
-
end
-
end
-
end
-
class SuspiciousActivityAlertJob < ApplicationJob
-
queue_as :critical
-
-
def perform(activity_id, reasons)
-
activity = Activity.find(activity_id)
-
-
# Send email to admins
-
AdminMailer.suspicious_activity_alert(activity, reasons).deliver_later
-
-
# Log to security monitoring system
-
log_to_security_monitoring(activity, reasons)
-
-
# Check if user should be temporarily locked
-
check_user_lockout(activity.user, reasons)
-
rescue ActiveRecord::RecordNotFound
-
Rails.logger.error "Activity #{activity_id} not found for suspicious activity alert"
-
end
-
-
private
-
-
def log_to_security_monitoring(activity, reasons)
-
log_message = <<~LOG
-
[SECURITY] Suspicious Activity Detected:
-
User: #{activity.user.email_address} (ID: #{activity.user.id})
-
IP: #{activity.ip_address}
-
Action: #{activity.full_action}
-
Path: #{activity.request_path}
-
Reasons: #{reasons.join(", ")}
-
Time: #{activity.occurred_at}
-
User Agent: #{activity.user_agent}
-
LOG
-
-
Rails.logger.warn log_message
-
end
-
-
def check_user_lockout(user, reasons)
-
# Lock user if there are critical security concerns
-
critical_reasons = ["failed_login_attempts", "ip_hopping", "excessive_errors"]
-
-
if (reasons & critical_reasons).any?
-
recent_suspicious_count = user.activities
-
.suspicious
-
.where("occurred_at > ?", 1.hour.ago)
-
.count
-
-
if recent_suspicious_count >= 3
-
lock_user_temporarily(user)
-
end
-
end
-
end
-
-
def lock_user_temporarily(user)
-
user.update!(
-
locked_at: Time.current,
-
lock_reason: "Suspicious activity detected"
-
)
-
-
# Send notification to user
-
UserMailer.account_temporarily_locked(user).deliver_later
-
end
-
end
-
class AdminMailer < ApplicationMailer
-
helper_method :rails_admin_url_for
-
-
def suspicious_activity_alert(activity, reasons)
-
@activity = activity
-
@reasons = reasons
-
@user = activity.user
-
-
# Get all admin users
-
admin_emails = User.where(role: :admin).pluck(:email_address)
-
-
mail(
-
to: admin_emails,
-
subject: "[SECURITY ALERT] Suspicious activity detected for #{@user.email_address}"
-
)
-
end
-
-
def daily_activity_report(admin, report)
-
@admin = admin
-
@report = report
-
@date = Date.current - 1.day
-
-
mail(
-
to: admin.email_address,
-
subject: "Daily Activity Report - #{@date.strftime('%B %d, %Y')}"
-
)
-
end
-
-
def security_scan_alert(suspicious_users)
-
@suspicious_users = suspicious_users
-
@scan_time = Time.current
-
-
# Get all admin users
-
admin_emails = User.where(role: :admin).pluck(:email_address)
-
-
mail(
-
to: admin_emails,
-
subject: "[SECURITY] Automated scan detected #{suspicious_users.count} suspicious users"
-
)
-
end
-
-
def system_maintenance_report(admin_user, maintenance_results)
-
@admin_user = admin_user
-
@maintenance_results = maintenance_results
-
@maintenance_time = Time.current
-
-
mail(to: admin_user.email_address, subject: "System Maintenance Report - #{@maintenance_time.strftime('%m/%d/%Y')}")
-
end
-
-
def user_account_alert(admin_user, user, alert_type, details = {})
-
@admin_user = admin_user
-
@user = user
-
@alert_type = alert_type
-
@details = details
-
@alert_time = Time.current
-
-
subject = case alert_type
-
when 'locked'
-
"User Account Locked - #{user.email_address}"
-
when 'suspended'
-
"User Account Suspended - #{user.email_address}"
-
when 'multiple_failed_logins'
-
"Multiple Failed Login Attempts - #{user.email_address}"
-
else
-
"User Account Alert - #{user.email_address}"
-
end
-
-
mail(to: admin_user.email_address, subject: subject)
-
end
-
-
def system_health_alert(admin_user, health_status, metrics)
-
@admin_user = admin_user
-
@health_status = health_status
-
@metrics = metrics
-
@alert_time = Time.current
-
-
subject = case health_status
-
when 'critical'
-
"🚨 CRITICAL System Health Alert"
-
when 'warning'
-
"⚠️ System Health Warning"
-
else
-
"System Health Status Update"
-
end
-
-
mail(to: admin_user.email_address, subject: subject)
-
end
-
-
def weekly_summary_report(admin_user, summary_data)
-
@admin_user = admin_user
-
@summary_data = summary_data
-
@week_start = 1.week.ago.beginning_of_week
-
@week_end = Date.current.end_of_week
-
-
mail(to: admin_user.email_address, subject: "Weekly Summary Report - #{@week_start.strftime('%m/%d')} to #{@week_end.strftime('%m/%d/%Y')}")
-
end
-
-
private
-
-
def rails_admin_url_for(object, action = :show)
-
host = Rails.application.config.action_mailer.default_url_options[:host] || 'localhost:3000'
-
protocol = Rails.application.config.action_mailer.default_url_options[:protocol] || 'http'
-
model_name = object.class.name.underscore
-
"#{protocol}://#{host}/admin/#{model_name}/#{object.id}"
-
end
-
end
-
class ApplicationMailer < ActionMailer::Base
-
default from: "from@example.com"
-
layout "mailer"
-
end
-
class PasswordsMailer < ApplicationMailer
-
def reset(user)
-
@user = user
-
mail subject: "Reset your password", to: user.email_address
-
end
-
end
-
class UserMailer < ApplicationMailer
-
def account_temporarily_locked(user)
-
@user = user
-
@unlock_time = 1.hour.from_now
-
-
mail(
-
to: @user.email_address,
-
subject: "Your account has been temporarily locked"
-
)
-
end
-
end
-
class AbTest < ApplicationRecord
-
belongs_to :campaign
-
belongs_to :user
-
has_many :ab_test_variants, dependent: :destroy
-
has_many :journeys, through: :ab_test_variants
-
belongs_to :winner_variant, class_name: 'AbTestVariant', optional: true
-
-
STATUSES = %w[draft running paused completed cancelled].freeze
-
TEST_TYPES = %w[
-
conversion engagement retention click_through
-
bounce_rate time_on_page form_completion
-
email_open email_click purchase revenue
-
].freeze
-
-
validates :name, presence: true, uniqueness: { scope: :campaign_id }
-
validates :status, inclusion: { in: STATUSES }
-
validates :test_type, inclusion: { in: TEST_TYPES }
-
validates :confidence_level, presence: true, numericality: {
-
greater_than: 50, less_than_or_equal_to: 99.9
-
}
-
validates :significance_threshold, presence: true, numericality: {
-
greater_than: 0, less_than_or_equal_to: 20
-
}
-
-
validate :end_date_after_start_date
-
validate :variants_traffic_percentage_sum
-
-
# Use settings JSON for additional attributes
-
store_accessor :settings, :minimum_sample_size
-
-
scope :active, -> { where(status: ['running', 'paused']) }
-
scope :completed, -> { where(status: 'completed') }
-
scope :by_type, ->(type) { where(test_type: type) }
-
scope :recent, -> { order(created_at: :desc) }
-
scope :running, -> { where(status: 'running') }
-
-
def start!
-
return false unless can_start?
-
-
update!(status: 'running', start_date: Time.current)
-
-
# Start tracking for all variants
-
ab_test_variants.each(&:reset_metrics!)
-
-
true
-
end
-
-
def pause!
-
update!(status: 'paused')
-
end
-
-
def resume!
-
return false unless paused?
-
-
update!(status: 'running')
-
end
-
-
def complete!
-
return false unless running?
-
-
determine_winner!
-
update!(status: 'completed', end_date: Time.current)
-
end
-
-
def cancel!
-
update!(status: 'cancelled', end_date: Time.current)
-
end
-
-
def running?
-
status == 'running'
-
end
-
-
def paused?
-
status == 'paused'
-
end
-
-
def completed?
-
status == 'completed'
-
end
-
-
def can_start?
-
draft? && ab_test_variants.count >= 2 && valid_traffic_allocation?
-
end
-
-
def draft?
-
status == 'draft'
-
end
-
-
def duration_days
-
return 0 unless start_date
-
-
end_time = end_date || Time.current
-
((end_time - start_date) / 1.day).round(1)
-
end
-
-
def progress_percentage
-
return 0 unless start_date && end_date
-
-
# Calculate how much time has elapsed vs planned duration
-
elapsed_time = Time.current - start_date
-
planned_time = end_date - start_date
-
-
return 100 if elapsed_time >= planned_time
-
-
elapsed_days = elapsed_time / 1.day
-
planned_days = planned_time / 1.day
-
-
[(elapsed_days / planned_days * 100).round, 100].min
-
end
-
-
def planned_duration_days
-
return 0 unless start_date && end_date
-
-
((end_date - start_date) / 1.day).round(1)
-
end
-
-
def statistical_significance_reached?
-
return false unless running? || completed?
-
-
control_variant = ab_test_variants.find_by(is_control: true)
-
return false unless control_variant
-
-
treatment_variants = ab_test_variants.where(is_control: false)
-
-
treatment_variants.any? do |variant|
-
calculate_statistical_significance_between(control_variant, variant) >= significance_threshold
-
end
-
end
-
-
def determine_winner!
-
return if ab_test_variants.count < 2
-
-
# Find the variant with the highest conversion rate that is statistically significant
-
control_variant = ab_test_variants.find_by(is_control: true)
-
return unless control_variant
-
-
significant_variants = ab_test_variants.select do |variant|
-
next true if variant.is_control? # Control is always included
-
-
calculate_statistical_significance_between(control_variant, variant) >= significance_threshold
-
end
-
-
return if significant_variants.empty?
-
-
winner = significant_variants.max_by(&:conversion_rate)
-
update!(winner_variant: winner) if winner
-
end
-
-
def winner_declared?
-
winner_variant.present?
-
end
-
-
def assign_visitor(visitor_id)
-
return nil unless can_start?
-
-
# Use consistent hashing to assign visitors to variants
-
hash_value = Digest::MD5.hexdigest("#{id}-#{visitor_id}").to_i(16)
-
percentage = hash_value % 100
-
-
cumulative_percentage = 0
-
ab_test_variants.order(:id).each do |variant|
-
cumulative_percentage += variant.traffic_percentage
-
if percentage < cumulative_percentage
-
variant.record_visitor!
-
return variant
-
end
-
end
-
-
# Fallback to last variant if rounding errors occur
-
ab_test_variants.last
-
end
-
-
def performance_report
-
{
-
test_name: name,
-
status: status,
-
start_date: start_date,
-
end_date: end_date,
-
progress_percentage: progress_percentage,
-
variants: ab_test_variants.map(&:detailed_metrics),
-
winner: winner_variant&.name,
-
statistical_significance_reached: statistical_significance_reached?
-
}
-
end
-
-
def generate_insights
-
insights_array = []
-
-
if running?
-
insights_array << "Test has been running for #{((Time.current - start_date) / 1.day).round} days"
-
insights_array << "#{progress_percentage}% of planned duration completed"
-
-
if statistical_significance_reached?
-
insights_array << "Statistical significance has been reached"
-
else
-
insights_array << "More data needed to reach statistical significance"
-
end
-
end
-
-
if completed?
-
if winner_variant
-
insights_array << "Winner: #{winner_variant.name} with #{winner_variant.conversion_rate}% conversion rate"
-
control = ab_test_variants.find_by(is_control: true)
-
if control && control != winner_variant
-
lift = winner_variant.lift_vs_control
-
insights_array << "Lift vs control: #{lift}%"
-
end
-
else
-
insights_array << "No clear winner could be determined"
-
end
-
end
-
-
# Return hash format expected by test
-
{
-
performance_summary: performance_report,
-
statistical_summary: calculate_statistical_summary,
-
recommendations: insights_array,
-
next_steps: generate_next_steps
-
}
-
end
-
-
def calculate_statistical_significance
-
control = ab_test_variants.find_by(is_control: true)
-
return {} unless control
-
-
best_treatment = ab_test_variants.where(is_control: false)
-
.order(conversion_rate: :desc)
-
.first
-
-
return {} unless best_treatment
-
-
significance_value = calculate_statistical_significance_between(control, best_treatment)
-
-
{
-
p_value: (1 - significance_value / 100.0).round(4),
-
is_significant: significance_value >= significance_threshold,
-
confidence_interval: significance_value.round(2)
-
}
-
end
-
-
def complete_test!
-
return false unless can_complete?
-
-
transaction do
-
determine_winner!
-
update!(
-
status: 'completed',
-
end_date: Time.current
-
)
-
end
-
-
true
-
end
-
-
def meets_minimum_sample_size?
-
return true unless minimum_sample_size.present?
-
-
total_visitors = ab_test_variants.sum(:total_visitors)
-
total_visitors >= minimum_sample_size.to_i
-
end
-
-
def can_complete?
-
running? && (
-
end_date.present? && Time.current >= end_date ||
-
statistical_significance_reached? ||
-
meets_minimum_sample_size?
-
)
-
end
-
-
def calculate_statistical_summary
-
{
-
control_conversion_rate: ab_test_variants.control.first&.conversion_rate || 0,
-
best_variant_conversion_rate: ab_test_variants.order(conversion_rate: :desc).first&.conversion_rate || 0,
-
sample_size: ab_test_variants.sum(:total_visitors),
-
total_conversions: ab_test_variants.sum(:conversions)
-
}
-
end
-
-
def generate_next_steps
-
steps = []
-
-
if draft?
-
steps << "Configure test variants and traffic allocation"
-
steps << "Set start and end dates"
-
steps << "Review and launch test"
-
elsif running?
-
if !meets_minimum_sample_size?
-
steps << "Continue running test to reach minimum sample size"
-
elsif !statistical_significance_reached?
-
steps << "Continue test to achieve statistical significance"
-
else
-
steps << "Consider ending test and declaring winner"
-
end
-
elsif completed?
-
steps << "Implement winning variant across all traffic"
-
steps << "Document learnings and insights"
-
steps << "Plan follow-up tests based on results"
-
end
-
-
steps
-
end
-
-
def results_summary
-
return {} unless ab_test_variants.any?
-
-
control = ab_test_variants.find_by(is_control: true)
-
treatments = ab_test_variants.where(is_control: false)
-
-
{
-
test_name: name,
-
status: status,
-
duration_days: duration_days,
-
statistical_significance: statistical_significance_reached?,
-
winner: winner_variant&.name,
-
control_performance: control&.performance_summary,
-
treatment_performances: treatments.map(&:performance_summary),
-
confidence_level: confidence_level,
-
total_visitors: ab_test_variants.sum(:total_visitors),
-
overall_conversion_rate: calculate_overall_conversion_rate
-
}
-
end
-
-
def variant_comparison
-
return [] unless ab_test_variants.count >= 2
-
-
control = ab_test_variants.find_by(is_control: true)
-
return [] unless control
-
-
treatments = ab_test_variants.where(is_control: false)
-
-
treatments.map do |treatment|
-
significance = calculate_statistical_significance(control, treatment)
-
lift = calculate_lift(control, treatment)
-
-
{
-
variant_name: treatment.name,
-
control_conversion_rate: control.conversion_rate,
-
treatment_conversion_rate: treatment.conversion_rate,
-
lift_percentage: lift,
-
statistical_significance: significance,
-
is_significant: significance >= significance_threshold,
-
confidence_interval: calculate_confidence_interval(treatment),
-
sample_size: treatment.total_visitors
-
}
-
end
-
end
-
-
def recommend_action
-
return 'Test not yet started' unless running? || completed?
-
return 'Insufficient data' if ab_test_variants.sum(:total_visitors) < 100
-
-
if statistical_significance_reached?
-
if winner_declared?
-
"Implement #{winner_variant.name} variant (statistically significant winner)"
-
else
-
'Continue test - significance reached but no clear winner'
-
end
-
else
-
if duration_days > 14
-
'Consider extending test duration or increasing traffic'
-
else
-
'Continue test - more data needed for statistical significance'
-
end
-
end
-
end
-
-
def self.create_basic_ab_test(campaign, name, control_journey, treatment_journey, test_type = 'conversion')
-
test = create!(
-
campaign: campaign,
-
user: campaign.user,
-
name: name,
-
test_type: test_type,
-
hypothesis: "Treatment journey will outperform control journey for #{test_type}"
-
)
-
-
# Create control variant
-
test.ab_test_variants.create!(
-
journey: control_journey,
-
name: 'Control',
-
is_control: true,
-
traffic_percentage: 50.0
-
)
-
-
# Create treatment variant
-
test.ab_test_variants.create!(
-
journey: treatment_journey,
-
name: 'Treatment',
-
is_control: false,
-
traffic_percentage: 50.0
-
)
-
-
test
-
end
-
-
private
-
-
def end_date_after_start_date
-
return unless start_date && end_date
-
-
errors.add(:end_date, 'must be after start date') if end_date <= start_date
-
end
-
-
def variants_traffic_percentage_sum
-
return unless ab_test_variants.any?
-
-
total_percentage = ab_test_variants.sum(:traffic_percentage)
-
unless (99.0..101.0).cover?(total_percentage)
-
errors.add(:base, 'Variant traffic percentages must sum to 100%')
-
end
-
end
-
-
def valid_traffic_allocation?
-
return false unless ab_test_variants.any?
-
-
total_percentage = ab_test_variants.sum(:traffic_percentage)
-
(99.0..101.0).cover?(total_percentage)
-
end
-
-
def calculate_statistical_significance_between(control, treatment)
-
return 0 if control.total_visitors == 0 || treatment.total_visitors == 0
-
-
# Simplified z-test calculation for conversion rates
-
p1 = control.conversion_rate / 100.0
-
p2 = treatment.conversion_rate / 100.0
-
n1 = control.total_visitors
-
n2 = treatment.total_visitors
-
-
# Pooled proportion
-
p_pool = (control.conversions + treatment.conversions).to_f / (n1 + n2)
-
-
# Standard error
-
se = Math.sqrt(p_pool * (1 - p_pool) * (1.0/n1 + 1.0/n2))
-
-
return 0 if se == 0
-
-
# Z-score
-
z = (p2 - p1).abs / se
-
-
# Convert to significance percentage (simplified)
-
significance = [(1 - Math.exp(-z * z / 2)) * 100, 99.9].min
-
significance.round(1)
-
end
-
-
def calculate_lift(control, treatment)
-
return 0 if control.conversion_rate == 0
-
-
((treatment.conversion_rate - control.conversion_rate) / control.conversion_rate * 100).round(1)
-
end
-
-
def calculate_confidence_interval(variant)
-
return [0, 0] if variant.total_visitors == 0
-
-
p = variant.conversion_rate / 100.0
-
n = variant.total_visitors
-
-
# 95% confidence interval for proportion
-
margin_of_error = 1.96 * Math.sqrt(p * (1 - p) / n)
-
-
lower = [(p - margin_of_error) * 100, 0].max
-
upper = [(p + margin_of_error) * 100, 100].min
-
-
[lower.round(1), upper.round(1)]
-
end
-
-
def calculate_overall_conversion_rate
-
total_visitors = ab_test_variants.sum(:total_visitors)
-
return 0 if total_visitors == 0
-
-
total_conversions = ab_test_variants.sum(:conversions)
-
(total_conversions.to_f / total_visitors * 100).round(2)
-
end
-
end
-
class AbTestVariant < ApplicationRecord
-
belongs_to :ab_test
-
belongs_to :journey
-
has_one :campaign, through: :ab_test
-
has_one :user, through: :ab_test
-
-
VARIANT_TYPES = %w[control treatment variation].freeze
-
-
validates :name, presence: true, uniqueness: { scope: :ab_test_id }
-
validates :variant_type, inclusion: { in: VARIANT_TYPES }
-
validates :traffic_percentage, presence: true, numericality: {
-
greater_than: 0, less_than_or_equal_to: 100
-
}
-
validates :total_visitors, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :conversions, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :conversion_rate, presence: true, numericality: {
-
greater_than_or_equal_to: 0, less_than_or_equal_to: 100
-
}
-
-
validate :conversions_not_exceed_visitors
-
validate :only_one_control_per_test
-
-
scope :control, -> { where(is_control: true) }
-
scope :treatments, -> { where(is_control: false) }
-
scope :by_conversion_rate, -> { order(conversion_rate: :desc) }
-
scope :significant, -> { where('confidence_interval > ?', 95.0) }
-
-
before_save :calculate_conversion_rate
-
-
def control?
-
is_control
-
end
-
-
def treatment?
-
!is_control
-
end
-
-
def reset_metrics!
-
update!(
-
total_visitors: 0,
-
conversions: 0,
-
conversion_rate: 0.0,
-
confidence_interval: 0.0
-
)
-
end
-
-
def record_visitor!
-
increment!(:total_visitors)
-
calculate_and_update_conversion_rate
-
end
-
-
def record_conversion!
-
increment!(:conversions)
-
calculate_and_update_conversion_rate
-
end
-
-
def performance_summary
-
{
-
name: name,
-
variant_type: variant_type,
-
is_control: is_control,
-
traffic_percentage: traffic_percentage,
-
total_visitors: total_visitors,
-
conversions: conversions,
-
conversion_rate: conversion_rate,
-
confidence_interval: confidence_interval,
-
journey_name: journey.name
-
}
-
end
-
-
def sample_size_adequate?
-
# Rule of thumb: at least 100 visitors and 10 conversions for meaningful results
-
total_visitors >= 100 && conversions >= 10
-
end
-
-
def statistical_power
-
return 0 if total_visitors == 0
-
-
# Simplified power calculation based on sample size
-
# In practice, this would use more sophisticated statistical methods
-
case total_visitors
-
when 0..99 then 'Low'
-
when 100..499 then 'Medium'
-
when 500..999 then 'High'
-
else 'Very High'
-
end
-
end
-
-
def lift_vs_control
-
return 0 unless ab_test && ab_test.ab_test_variants.any?
-
-
control_variant = ab_test.ab_test_variants.find_by(is_control: true)
-
return 0 unless control_variant && control_variant != self
-
return 0 if control_variant.conversion_rate == 0
-
-
((conversion_rate - control_variant.conversion_rate) / control_variant.conversion_rate * 100).round(1)
-
end
-
-
# Alias for backward compatibility
-
def calculate_lift
-
lift_vs_control
-
end
-
-
def significance_vs_control
-
return 0 unless ab_test && ab_test.ab_test_variants.any?
-
-
control_variant = ab_test.ab_test_variants.find_by(is_control: true)
-
return 0 unless control_variant && control_variant != self
-
-
calculate_significance_against(control_variant)
-
end
-
-
def confidence_interval_range
-
return [0, 0] if total_visitors == 0
-
-
p = conversion_rate / 100.0
-
n = total_visitors
-
-
# Calculate 95% confidence interval
-
margin_of_error = 1.96 * Math.sqrt(p * (1 - p) / n)
-
-
lower = [(p - margin_of_error) * 100, 0].max
-
upper = [(p + margin_of_error) * 100, 100].min
-
-
[lower.round(1), upper.round(1)]
-
end
-
-
def expected_visitors_per_day
-
return 0 unless ab_test.start_date && ab_test.running?
-
-
days_running = [(Time.current - ab_test.start_date) / 1.day, 1].max
-
(total_visitors / days_running).round
-
end
-
-
def days_to_significance(target_significance = 95.0)
-
return 'N/A' unless ab_test.running? && expected_visitors_per_day > 0
-
-
# Simplified calculation - in practice would use power analysis
-
control_variant = ab_test.ab_test_variants.find_by(is_control: true)
-
return 'N/A' unless control_variant
-
-
current_significance = significance_vs_control
-
return 'Already significant' if current_significance >= target_significance
-
-
# Estimate additional visitors needed (simplified)
-
additional_visitors_needed = [500 - total_visitors, 0].max
-
days_needed = (additional_visitors_needed / expected_visitors_per_day).ceil
-
-
"~#{days_needed} days"
-
end
-
-
def journey_performance_context
-
{
-
journey_name: journey.name,
-
journey_status: journey.status,
-
total_steps: journey.total_steps,
-
completion_rate: journey_completion_rate,
-
average_journey_time: average_journey_completion_time
-
}
-
end
-
-
def detailed_metrics
-
base_metrics = performance_summary
-
-
base_metrics.merge({
-
lift_vs_control: lift_vs_control,
-
significance_vs_control: significance_vs_control,
-
confidence_interval_range: confidence_interval_range,
-
sample_size_adequate: sample_size_adequate?,
-
statistical_power: statistical_power,
-
expected_visitors_per_day: expected_visitors_per_day,
-
days_to_significance: days_to_significance,
-
journey_context: journey_performance_context
-
})
-
end
-
-
def calculate_required_sample_size(desired_lift = 20, power = 0.8, alpha = 0.05)
-
# Simplified sample size calculation for A/B test
-
# In practice, would use more sophisticated statistical methods
-
-
baseline_rate = is_control ? (conversion_rate / 100.0) : 0.05 # Default 5% if not control
-
effect_size = baseline_rate * (desired_lift / 100.0)
-
-
# Simplified formula - actual calculation would be more complex
-
estimated_sample_size = (2 * (1.96 + 0.84)**2 * baseline_rate * (1 - baseline_rate)) / (effect_size**2)
-
-
estimated_sample_size.round
-
end
-
-
private
-
-
def conversions_not_exceed_visitors
-
return unless total_visitors && conversions
-
-
errors.add(:conversions, 'cannot exceed total visitors') if conversions > total_visitors
-
end
-
-
def only_one_control_per_test
-
return unless is_control? && ab_test
-
-
existing_control = ab_test.ab_test_variants.where(is_control: true).where.not(id: id).exists?
-
errors.add(:is_control, 'only one control variant allowed per test') if existing_control
-
end
-
-
def calculate_conversion_rate
-
self.conversion_rate = if total_visitors > 0
-
(conversions.to_f / total_visitors * 100).round(2)
-
else
-
0.0
-
end
-
end
-
-
def calculate_and_update_conversion_rate
-
calculate_conversion_rate
-
save! if changed?
-
end
-
-
def calculate_significance_against(other_variant)
-
return 0 if total_visitors == 0 || other_variant.total_visitors == 0
-
-
# Z-test for proportions
-
p1 = conversion_rate / 100.0
-
p2 = other_variant.conversion_rate / 100.0
-
n1 = total_visitors
-
n2 = other_variant.total_visitors
-
-
# Pooled proportion
-
p_pool = (conversions + other_variant.conversions).to_f / (n1 + n2)
-
-
# Standard error
-
se = Math.sqrt(p_pool * (1 - p_pool) * (1.0/n1 + 1.0/n2))
-
-
return 0 if se == 0
-
-
# Z-score
-
z = (p1 - p2).abs / se
-
-
# Convert to confidence level (simplified)
-
confidence = [(1 - Math.exp(-z * z / 2)) * 100, 99.9].min
-
confidence.round(1)
-
end
-
-
def journey_completion_rate
-
# This would integrate with actual journey execution data
-
# For now, return conversion rate as a proxy
-
conversion_rate
-
end
-
-
def average_journey_completion_time
-
# This would integrate with actual journey execution timing data
-
# For now, return a placeholder
-
journey.journey_steps.sum(:duration_days)
-
end
-
end
-
2
class Activity < ApplicationRecord
-
2
belongs_to :user
-
-
# Validations
-
2
validates :action, presence: true
-
2
validates :controller, presence: true
-
2
validates :occurred_at, presence: true
-
-
# Scopes
-
2
scope :recent, -> { order(occurred_at: :desc) }
-
2
scope :suspicious, -> { where(suspicious: true) }
-
2
scope :normal, -> { where(suspicious: false) }
-
48
scope :by_user, ->(user) { where(user: user) }
-
3
scope :by_action, ->(action) { where(action: action) }
-
3
scope :by_controller, ->(controller) { where(controller: controller) }
-
2
scope :today, -> { where(occurred_at: Time.current.beginning_of_day..Time.current.end_of_day) }
-
2
scope :this_week, -> { where(occurred_at: Time.current.beginning_of_week..Time.current.end_of_week) }
-
2
scope :this_month, -> { where(occurred_at: Time.current.beginning_of_month..Time.current.end_of_month) }
-
18
scope :failed_requests, -> { where("response_status >= ?", 400) }
-
2
scope :successful_requests, -> { where("response_status < ?", 400) }
-
-
# Callbacks
-
2
before_validation :set_occurred_at, on: :create
-
-
# Serialize metadata
-
2
serialize :metadata, coder: JSON
-
-
# Class methods
-
2
def self.log_activity(user:, action:, controller:, request:, response: nil, metadata: {})
-
15
create!(
-
user: user,
-
action: action,
-
controller: controller,
-
request_path: request.path,
-
request_method: request.method,
-
ip_address: request.remote_ip,
-
user_agent: request.user_agent,
-
session_id: request.session.id,
-
referrer: request.referrer,
-
response_status: response&.status,
-
response_time: metadata[:response_time],
-
metadata: metadata,
-
device_type: parse_device_type(request.user_agent),
-
browser_name: parse_browser_name(request.user_agent),
-
os_name: parse_os_name(request.user_agent),
-
occurred_at: Time.current
-
)
-
end
-
-
2
def self.parse_device_type(user_agent)
-
15
return nil unless user_agent
-
case user_agent
-
when /tablet|ipad/i
-
"tablet"
-
when /mobile|android|iphone|phone/i
-
"mobile"
-
else
-
"desktop"
-
end
-
end
-
-
2
def self.parse_browser_name(user_agent)
-
15
return nil unless user_agent
-
case user_agent
-
when /chrome/i
-
"Chrome"
-
when /safari/i
-
"Safari"
-
when /firefox/i
-
"Firefox"
-
when /edge/i
-
"Edge"
-
when /opera/i
-
"Opera"
-
else
-
"Other"
-
end
-
end
-
-
2
def self.parse_os_name(user_agent)
-
15
return nil unless user_agent
-
case user_agent
-
when /windows/i
-
"Windows"
-
when /mac|darwin/i
-
"macOS"
-
when /android/i
-
"Android"
-
when /ios|iphone|ipad/i
-
"iOS"
-
when /linux/i
-
"Linux"
-
else
-
"Other"
-
end
-
end
-
-
# Instance methods
-
2
def suspicious?
-
suspicious
-
end
-
-
2
def failed?
-
7
response_status && response_status >= 400
-
end
-
-
2
def successful?
-
response_status && response_status < 400
-
end
-
-
2
def full_action
-
"#{controller}##{action}"
-
end
-
-
2
def duration_in_ms
-
response_time ? (response_time * 1000).round(2) : nil
-
end
-
-
2
private
-
-
2
def set_occurred_at
-
15
self.occurred_at ||= Time.current
-
end
-
end
-
2
class AdminAuditLog < ApplicationRecord
-
2
belongs_to :user
-
2
belongs_to :auditable, polymorphic: true, optional: true
-
-
2
validates :action, presence: true
-
-
2
scope :recent, -> { order(created_at: :desc) }
-
2
scope :by_user, ->(user) { where(user: user) }
-
2
scope :by_action, ->(action) { where(action: action) }
-
-
2
def self.log_action(user:, action:, auditable: nil, changes: nil, request: nil)
-
create!(
-
user: user,
-
action: action,
-
auditable: auditable,
-
change_details: changes&.to_json,
-
ip_address: request&.remote_ip,
-
user_agent: request&.user_agent
-
)
-
end
-
-
2
def parsed_changes
-
return {} unless change_details.present?
-
JSON.parse(change_details)
-
rescue JSON::ParserError
-
{}
-
end
-
end
-
2
class ApplicationRecord < ActiveRecord::Base
-
2
primary_abstract_class
-
end
-
2
class Brand < ApplicationRecord
-
2
include Branding::Compliance::CacheInvalidation
-
-
2
belongs_to :user
-
2
has_many :brand_assets, dependent: :destroy
-
2
has_many :brand_guidelines, dependent: :destroy
-
2
has_one :messaging_framework, dependent: :destroy
-
2
has_many :brand_analyses, dependent: :destroy
-
2
has_many :journeys
-
2
has_many :compliance_results, dependent: :destroy
-
-
# Validations
-
2
validates :name, presence: true, uniqueness: { scope: :user_id }
-
2
validates :user, presence: true
-
-
# Scopes
-
2
scope :active, -> { where(active: true) }
-
2
scope :by_industry, ->(industry) { where(industry: industry) }
-
-
# Callbacks
-
2
after_create :create_default_messaging_framework
-
-
# Methods
-
2
def latest_analysis
-
brand_analyses.order(created_at: :desc).first
-
end
-
-
2
def has_complete_brand_assets?
-
brand_assets.where(processing_status: "completed").exists?
-
end
-
-
2
def guidelines_by_category(category)
-
brand_guidelines.active.where(category: category).order(priority: :desc)
-
end
-
-
2
def primary_colors
-
color_scheme["primary"] || []
-
end
-
-
2
def secondary_colors
-
color_scheme["secondary"] || []
-
end
-
-
2
def font_families
-
typography["font_families"] || {}
-
end
-
-
2
def brand_voice_attributes
-
latest_analysis&.voice_attributes || {}
-
end
-
-
2
private
-
-
2
def create_default_messaging_framework
-
MessagingFramework.create!(brand: self)
-
end
-
end
-
2
class BrandAnalysis < ApplicationRecord
-
2
include Branding::Compliance::CacheInvalidation
-
-
2
belongs_to :brand
-
-
# Constants
-
2
ANALYSIS_STATUSES = %w[pending processing completed failed].freeze
-
-
# Validations
-
2
validates :analysis_status, inclusion: { in: ANALYSIS_STATUSES }
-
2
validates :confidence_score, numericality: { in: 0..1 }, allow_nil: true
-
-
# Scopes
-
2
scope :completed, -> { where(analysis_status: "completed") }
-
2
scope :recent, -> { order(created_at: :desc) }
-
2
scope :high_confidence, -> { where("confidence_score >= ?", 0.8) }
-
-
# Callbacks
-
2
before_validation :set_defaults
-
-
# Methods
-
2
def completed?
-
analysis_status == "completed"
-
end
-
-
2
def processing?
-
analysis_status == "processing"
-
end
-
-
2
def failed?
-
analysis_status == "failed"
-
end
-
-
2
def mark_as_processing!
-
update!(analysis_status: "processing")
-
end
-
-
2
def mark_as_completed!(confidence: nil)
-
update!(
-
analysis_status: "completed",
-
analyzed_at: Time.current,
-
confidence_score: confidence
-
)
-
end
-
-
2
def mark_as_failed!(error_message = nil)
-
update!(
-
analysis_status: "failed",
-
analysis_notes: error_message
-
)
-
end
-
-
2
def voice_formality
-
voice_attributes.dig("formality", "level") || "neutral"
-
end
-
-
2
def voice_tone
-
voice_attributes.dig("tone", "primary") || "professional"
-
end
-
-
2
def keywords
-
# Extract keywords from analysis_data JSON
-
analysis_data&.dig('keywords') || []
-
end
-
-
2
def primary_brand_values
-
brand_values.first(3)
-
end
-
-
2
def has_visual_guidelines?
-
visual_guidelines.present? && visual_guidelines.any?
-
end
-
-
2
def color_palette
-
visual_guidelines.dig("colors") || {}
-
end
-
-
2
def typography_rules
-
visual_guidelines.dig("typography") || {}
-
end
-
-
2
private
-
-
2
def set_defaults
-
self.analysis_data ||= {}
-
self.extracted_rules ||= {}
-
self.voice_attributes ||= {}
-
self.brand_values ||= []
-
self.messaging_pillars ||= []
-
self.visual_guidelines ||= {}
-
end
-
end
-
2
class BrandAsset < ApplicationRecord
-
2
belongs_to :brand
-
2
has_one_attached :file
-
-
# Constants
-
2
ASSET_TYPES = %w[brand_guidelines logo style_guide document image video template].freeze
-
2
PROCESSING_STATUSES = %w[pending processing completed failed].freeze
-
-
ALLOWED_CONTENT_TYPES = {
-
2
document: %w[
-
application/pdf
-
application/msword
-
application/vnd.openxmlformats-officedocument.wordprocessingml.document
-
text/plain
-
text/rtf
-
],
-
image: %w[
-
image/jpeg
-
image/png
-
image/gif
-
image/svg+xml
-
image/webp
-
],
-
video: %w[
-
video/mp4
-
video/quicktime
-
video/x-msvideo
-
],
-
archive: %w[
-
application/zip
-
application/x-zip-compressed
-
]
-
}.freeze
-
-
# Validations
-
2
validates :asset_type, presence: true, inclusion: { in: ASSET_TYPES }
-
2
validates :processing_status, inclusion: { in: PROCESSING_STATUSES }
-
2
validates :file, presence: true
-
-
# Scopes
-
2
scope :by_type, ->(type) { where(asset_type: type) }
-
2
scope :processed, -> { where(processing_status: "completed") }
-
2
scope :pending, -> { where(processing_status: "pending") }
-
2
scope :failed, -> { where(processing_status: "failed") }
-
-
# Callbacks
-
2
after_create_commit :queue_processing_job, unless: -> { Rails.env.test? }
-
-
# Methods
-
2
def document?
-
ALLOWED_CONTENT_TYPES[:document].include?(content_type)
-
end
-
-
2
def image?
-
ALLOWED_CONTENT_TYPES[:image].include?(content_type)
-
end
-
-
2
def video?
-
ALLOWED_CONTENT_TYPES[:video].include?(content_type)
-
end
-
-
2
def archive?
-
ALLOWED_CONTENT_TYPES[:archive].include?(content_type)
-
end
-
-
2
def processed?
-
processing_status == "completed"
-
end
-
-
2
def processing?
-
processing_status == "processing"
-
end
-
-
2
def failed?
-
processing_status == "failed"
-
end
-
-
2
def file_size_mb
-
return 0 unless file.attached?
-
file.blob.byte_size.to_f / 1.megabyte
-
end
-
-
2
def content_type
-
return nil unless file.attached?
-
file.content_type
-
end
-
-
2
def mark_as_processing!
-
update!(processing_status: "processing")
-
end
-
-
2
def mark_as_completed!
-
update!(
-
processing_status: "completed",
-
processed_at: Time.current
-
)
-
end
-
-
2
def mark_as_failed!(error_message = nil)
-
update!(
-
processing_status: "failed",
-
metadata: metadata.merge(error: error_message)
-
)
-
end
-
-
2
private
-
-
2
def queue_processing_job
-
BrandAssetProcessingJob.perform_later(self)
-
end
-
end
-
2
class BrandGuideline < ApplicationRecord
-
2
include Branding::Compliance::CacheInvalidation
-
-
2
belongs_to :brand
-
-
# Constants
-
2
RULE_TYPES = %w[do dont must should avoid prefer].freeze
-
2
CATEGORIES = %w[voice tone visual messaging grammar style accessibility].freeze
-
-
# Validations
-
2
validates :rule_type, presence: true, inclusion: { in: RULE_TYPES }
-
2
validates :rule_content, presence: true
-
2
validates :category, inclusion: { in: CATEGORIES }, allow_nil: true
-
2
validates :priority, numericality: { greater_than_or_equal_to: 0 }
-
-
# Scopes
-
2
scope :active, -> { where(active: true) }
-
2
scope :by_category, ->(category) { where(category: category) }
-
2
scope :by_type, ->(type) { where(rule_type: type) }
-
2
scope :high_priority, -> { where("priority >= ?", 7) }
-
2
scope :ordered, -> { order(priority: :desc, created_at: :asc) }
-
-
# Methods
-
2
def positive_rule?
-
%w[do must should prefer].include?(rule_type)
-
end
-
-
2
def negative_rule?
-
%w[dont avoid].include?(rule_type)
-
end
-
-
2
def mandatory?
-
%w[must dont].include?(rule_type)
-
end
-
-
2
def suggestion?
-
%w[should prefer avoid].include?(rule_type)
-
end
-
-
2
def toggle_active!
-
update!(active: !active)
-
end
-
-
# Class methods
-
2
def self.by_priority
-
ordered.group_by(&:priority)
-
end
-
-
2
def self.mandatory_rules
-
active.where(rule_type: %w[must dont])
-
end
-
-
2
def self.suggestions
-
active.where(rule_type: %w[should prefer avoid])
-
end
-
end
-
2
class Campaign < ApplicationRecord
-
2
belongs_to :user
-
2
belongs_to :persona
-
2
has_many :journeys, dependent: :destroy
-
2
has_many :journey_analytics, through: :journeys, class_name: 'JourneyAnalytics'
-
2
has_many :campaign_analytics, dependent: :destroy
-
2
has_many :ab_tests, dependent: :destroy
-
-
2
STATUSES = %w[draft active paused completed archived].freeze
-
CAMPAIGN_TYPES = %w[
-
2
product_launch brand_awareness lead_generation customer_retention
-
seasonal_promotion content_marketing email_nurture social_media
-
event_promotion customer_onboarding re_engagement cross_sell
-
upsell referral awareness consideration conversion advocacy
-
].freeze
-
-
2
validates :name, presence: true, uniqueness: { scope: :user_id }
-
2
validates :status, inclusion: { in: STATUSES }
-
2
validates :campaign_type, inclusion: { in: CAMPAIGN_TYPES }, allow_blank: true
-
2
validates :persona, presence: true
-
-
2
scope :active, -> { where(status: 'active') }
-
2
scope :draft, -> { where(status: 'draft') }
-
2
scope :completed, -> { where(status: 'completed') }
-
2
scope :by_type, ->(type) { where(campaign_type: type) if type.present? }
-
2
scope :for_persona, ->(persona_id) { where(persona_id: persona_id) if persona_id.present? }
-
2
scope :running, -> { where(status: ['active', 'paused']) }
-
-
2
def activate!
-
update!(status: 'active', started_at: Time.current)
-
end
-
-
2
def pause!
-
update!(status: 'paused')
-
end
-
-
2
def complete!
-
update!(status: 'completed', ended_at: Time.current)
-
end
-
-
2
def archive!
-
update!(status: 'archived')
-
end
-
-
2
def active?
-
status == 'active'
-
end
-
-
2
def running?
-
%w[active paused].include?(status)
-
end
-
-
2
def completed?
-
status == 'completed'
-
end
-
-
2
def duration_days
-
return 0 unless started_at
-
-
end_date = ended_at || Time.current
-
((end_date - started_at) / 1.day).round
-
end
-
-
2
def total_journeys
-
journeys.count
-
end
-
-
2
def active_journeys
-
journeys.published.count
-
end
-
-
2
def performance_summary
-
return {} unless running? || completed?
-
-
{
-
total_executions: journey_executions_count,
-
completion_rate: completion_rate,
-
average_duration: average_journey_duration,
-
conversion_rate: conversion_rate,
-
engagement_score: engagement_score
-
}
-
end
-
-
2
def journey_executions_count
-
journeys.joins(:journey_executions).count
-
end
-
-
2
def completion_rate
-
total = journey_executions_count
-
return 0 if total == 0
-
-
completed = journeys.joins(:journey_executions)
-
.where(journey_executions: { status: 'completed' })
-
.count
-
-
(completed.to_f / total * 100).round(1)
-
end
-
-
2
def conversion_rate
-
# This would be calculated based on conversion goals
-
# For now, return completion rate as a proxy
-
completion_rate
-
end
-
-
2
def engagement_score
-
# Calculate based on step engagement, feedback, etc.
-
# For now, return a placeholder calculation
-
return 0 unless journey_executions_count > 0
-
-
# Use completion rate and feedback as basis
-
base_score = completion_rate
-
feedback_bonus = positive_feedback_percentage * 0.3
-
-
[base_score + feedback_bonus, 100].min.round(1)
-
end
-
-
2
def average_journey_duration
-
executions = journeys.joins(:journey_executions)
-
.where(journey_executions: { status: 'completed' })
-
.where.not(journey_executions: { completed_at: nil })
-
-
return 0 if executions.count == 0
-
-
total_duration = executions.sum do |journey|
-
journey.journey_executions.completed.sum do |execution|
-
execution.completed_at - execution.started_at
-
end
-
end
-
-
(total_duration / executions.count / 1.day).round(1)
-
end
-
-
2
def positive_feedback_percentage
-
total_feedback = journeys.joins(:suggestion_feedbacks).count
-
return 0 if total_feedback == 0
-
-
positive_feedback = journeys.joins(:suggestion_feedbacks)
-
.where(suggestion_feedbacks: { rating: 4..5 })
-
.count
-
-
(positive_feedback.to_f / total_feedback * 100).round(1)
-
end
-
-
2
def target_audience_context
-
persona.to_campaign_context
-
end
-
-
2
def progress_percentage
-
return 0 unless total_journeys > 0
-
-
(active_journeys.to_f / total_journeys * 100).round
-
end
-
-
2
def to_analytics_context
-
{
-
id: id,
-
name: name,
-
type: campaign_type,
-
persona: persona.name,
-
status: status,
-
duration_days: duration_days,
-
performance: performance_summary,
-
journeys_count: total_journeys
-
}
-
end
-
end
-
2
class ComplianceResult < ApplicationRecord
-
2
belongs_to :brand
-
-
# Validations
-
2
validates :content_type, presence: true
-
2
validates :content_hash, presence: true
-
2
validates :score, numericality: { greater_than_or_equal_to: 0, less_than_or_equal_to: 1 }
-
2
validates :violations_count, numericality: { greater_than_or_equal_to: 0 }
-
-
# Scopes
-
2
scope :compliant, -> { where(compliant: true) }
-
2
scope :non_compliant, -> { where(compliant: false) }
-
2
scope :recent, -> { order(created_at: :desc) }
-
2
scope :by_content_type, ->(type) { where(content_type: type) }
-
2
scope :high_score, -> { where("score >= ?", 0.9) }
-
2
scope :low_score, -> { where("score < ?", 0.5) }
-
-
# Class methods
-
2
def self.average_score
-
average(:score) || 0.0
-
end
-
-
2
def self.compliance_rate
-
return 0.0 if count == 0
-
(compliant.count.to_f / count * 100).round(2)
-
end
-
-
2
def self.common_violations(limit = 10)
-
all_violations = pluck(:violations_data).flatten
-
violation_counts = Hash.new(0)
-
-
all_violations.each do |violation|
-
key = violation["type"] || violation[:type]
-
violation_counts[key] += 1 if key
-
end
-
-
violation_counts.sort_by { |_, count| -count }.first(limit).to_h
-
end
-
-
# Instance methods
-
2
def high_severity_violations
-
violations_data.select { |v| %w[critical high].include?(v["severity"] || v[:severity]) }
-
end
-
-
2
def violation_summary
-
violations_by_type = violations_data.group_by { |v| v["type"] || v[:type] }
-
violations_by_type.transform_values(&:count)
-
end
-
-
2
def suggested_actions
-
suggestions_data.select { |s| (s["priority"] || s[:priority]) == "high" }
-
end
-
-
2
def processing_time_seconds
-
metadata&.dig("processing_time") || 0
-
end
-
-
2
def validators_used
-
metadata&.dig("validators_used") || []
-
end
-
-
2
def cache_efficiency
-
cache_hits = metadata&.dig("cache_hits") || 0
-
total_validators = validators_used.length
-
return 0.0 if total_validators == 0
-
-
(cache_hits.to_f / total_validators * 100).round(2)
-
end
-
end
-
2
module Branding
-
2
module Compliance
-
2
module CacheInvalidation
-
2
extend ActiveSupport::Concern
-
-
2
included do
-
6
after_commit :invalidate_compliance_cache, on: [:create, :update, :destroy]
-
end
-
-
2
private
-
-
2
def invalidate_compliance_cache
-
# Skip cache invalidation in test environment to avoid job issues
-
return if Rails.env.test?
-
-
brand_id = case self
-
when Brand then id
-
when BrandGuideline, BrandAnalysis then brand_id
-
else return
-
end
-
-
# Use the CacheService to invalidate rules
-
Branding::Compliance::CacheService.invalidate_rules(brand_id)
-
-
# Queue cache warming to rebuild cache
-
Branding::Compliance::CacheWarmerJob.perform_later(brand_id)
-
end
-
end
-
end
-
end
-
class ConversionFunnel < ApplicationRecord
-
belongs_to :journey
-
belongs_to :campaign
-
belongs_to :user
-
-
validates :funnel_name, presence: true
-
validates :stage, presence: true
-
validates :stage_order, presence: true, uniqueness: { scope: [:journey_id, :funnel_name, :period_start] }
-
validates :visitors, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :conversions, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :conversion_rate, presence: true, numericality: { greater_than_or_equal_to: 0, less_than_or_equal_to: 100 }
-
validates :drop_off_rate, presence: true, numericality: { greater_than_or_equal_to: 0, less_than_or_equal_to: 100 }
-
validates :period_start, presence: true
-
validates :period_end, presence: true
-
-
validate :period_end_after_start
-
validate :conversions_not_exceed_visitors
-
-
# Use metadata for additional data storage
-
store_accessor :metadata, :funnel_data, :total_users, :final_conversions, :overall_conversion_rate
-
-
scope :by_funnel, ->(funnel_name) { where(funnel_name: funnel_name) }
-
scope :by_stage, ->(stage) { where(stage: stage) }
-
scope :ordered_by_stage, -> { order(:stage_order) }
-
scope :for_period, ->(start_date, end_date) { where(period_start: start_date..end_date) }
-
scope :recent, -> { order(period_start: :desc) }
-
scope :high_conversion, -> { where('conversion_rate > ?', 20.0) }
-
scope :high_drop_off, -> { where('drop_off_rate > ?', 50.0) }
-
-
# Common funnel stages for marketing journeys
-
AWARENESS_STAGES = %w[impression reach view].freeze
-
CONSIDERATION_STAGES = %w[click engage explore read].freeze
-
CONVERSION_STAGES = %w[signup purchase subscribe convert].freeze
-
RETENTION_STAGES = %w[login return repeat_purchase loyalty].freeze
-
ADVOCACY_STAGES = %w[share recommend review refer].freeze
-
-
ALL_STAGES = (AWARENESS_STAGES + CONSIDERATION_STAGES +
-
CONVERSION_STAGES + RETENTION_STAGES + ADVOCACY_STAGES).freeze
-
-
def self.create_journey_funnel(journey, period_start, period_end, funnel_name = 'default')
-
# Create funnel stages based on journey steps
-
journey.journey_steps.order(:position).each_with_index do |step, index|
-
create!(
-
journey: journey,
-
campaign: journey.campaign,
-
user: journey.user,
-
funnel_name: funnel_name,
-
stage: step.stage,
-
stage_order: index + 1,
-
period_start: period_start,
-
period_end: period_end
-
)
-
end
-
end
-
-
def self.calculate_funnel_metrics(journey_id, funnel_name, period_start, period_end)
-
funnel_stages = where(journey_id: journey_id, funnel_name: funnel_name)
-
.where(period_start: period_start, period_end: period_end)
-
.ordered_by_stage
-
-
return [] if funnel_stages.empty?
-
-
# Calculate visitors and conversions for each stage
-
funnel_stages.each_with_index do |stage, index|
-
if index == 0
-
# First stage - visitors are the total who entered the journey
-
stage.update!(
-
visitors: calculate_stage_visitors(stage),
-
conversions: calculate_stage_conversions(stage)
-
)
-
else
-
# Subsequent stages - visitors are conversions from previous stage
-
previous_stage = funnel_stages[index - 1]
-
stage.update!(
-
visitors: previous_stage.conversions,
-
conversions: calculate_stage_conversions(stage)
-
)
-
end
-
-
# Calculate rates
-
stage.update!(
-
conversion_rate: stage.visitors > 0 ? (stage.conversions.to_f / stage.visitors * 100).round(2) : 0,
-
drop_off_rate: stage.visitors > 0 ? ((stage.visitors - stage.conversions).to_f / stage.visitors * 100).round(2) : 0
-
)
-
end
-
-
funnel_stages.reload
-
end
-
-
def self.funnel_overview(journey_id, funnel_name, period_start, period_end)
-
stages = by_funnel(funnel_name)
-
.where(journey_id: journey_id)
-
.where(period_start: period_start, period_end: period_end)
-
.ordered_by_stage
-
-
return {} if stages.empty?
-
-
total_visitors = stages.first.visitors
-
final_conversions = stages.last.conversions
-
overall_conversion_rate = total_visitors > 0 ? (final_conversions.to_f / total_visitors * 100).round(2) : 0
-
-
{
-
funnel_name: funnel_name,
-
total_visitors: total_visitors,
-
final_conversions: final_conversions,
-
overall_conversion_rate: overall_conversion_rate,
-
total_stages: stages.count,
-
biggest_drop_off_stage: stages.max_by(&:drop_off_rate)&.stage,
-
best_converting_stage: stages.max_by(&:conversion_rate)&.stage,
-
stages: stages.map(&:to_funnel_data)
-
}
-
end
-
-
def self.compare_funnels(journey_id, period1_start, period1_end, period2_start, period2_end, funnel_name = 'default')
-
period1_data = funnel_overview(journey_id, funnel_name, period1_start, period1_end)
-
period2_data = funnel_overview(journey_id, funnel_name, period2_start, period2_end)
-
-
return {} if period1_data.empty? || period2_data.empty?
-
-
{
-
period1: period1_data,
-
period2: period2_data,
-
comparison: {
-
visitor_change: period2_data[:total_visitors] - period1_data[:total_visitors],
-
conversion_change: period2_data[:final_conversions] - period1_data[:final_conversions],
-
rate_change: period2_data[:overall_conversion_rate] - period1_data[:overall_conversion_rate]
-
}
-
}
-
end
-
-
def to_funnel_data
-
{
-
stage: stage,
-
stage_order: stage_order,
-
visitors: visitors,
-
conversions: conversions,
-
conversion_rate: conversion_rate,
-
drop_off_rate: drop_off_rate,
-
drop_off_count: visitors - conversions
-
}
-
end
-
-
def next_stage
-
self.class.where(journey_id: journey_id, funnel_name: funnel_name, period_start: period_start)
-
.where(stage_order: stage_order + 1)
-
.first
-
end
-
-
def previous_stage
-
self.class.where(journey_id: journey_id, funnel_name: funnel_name, period_start: period_start)
-
.where(stage_order: stage_order - 1)
-
.first
-
end
-
-
def optimization_suggestions
-
suggestions = []
-
-
if drop_off_rate > 70
-
suggestions << "High drop-off rate (#{drop_off_rate}%) - consider improving #{stage} experience"
-
end
-
-
if conversion_rate < 10 && stage_order > 1
-
suggestions << "Low conversion rate (#{conversion_rate}%) - optimize #{stage} messaging or incentives"
-
end
-
-
if next_stage && next_stage.visitors < (conversions * 0.8)
-
suggestions << "Significant visitor loss between #{stage} and #{next_stage.stage} - check journey flow"
-
end
-
-
suggestions.empty? ? ["Performance looks good for #{stage} stage"] : suggestions
-
end
-
-
private
-
-
def period_end_after_start
-
return unless period_start && period_end
-
-
errors.add(:period_end, 'must be after period start') if period_end <= period_start
-
end
-
-
def conversions_not_exceed_visitors
-
return unless visitors && conversions
-
-
errors.add(:conversions, 'cannot exceed visitors') if conversions > visitors
-
end
-
-
def self.calculate_stage_visitors(stage)
-
# This would integrate with actual execution data
-
# For now, return a placeholder calculation based on journey executions
-
journey = stage.journey
-
-
executions_in_period = journey.journey_executions
-
.where(created_at: stage.period_start..stage.period_end)
-
-
# Count executions that reached this stage
-
stage_step = journey.journey_steps.find_by(stage: stage.stage)
-
return 0 unless stage_step
-
-
executions_in_period.joins(:step_executions)
-
.where(step_executions: { journey_step_id: stage_step.id })
-
.distinct
-
.count
-
end
-
-
def self.calculate_stage_conversions(stage)
-
# This would integrate with actual execution data
-
# For now, return a placeholder calculation based on completed step executions
-
journey = stage.journey
-
-
executions_in_period = journey.journey_executions
-
.where(created_at: stage.period_start..stage.period_end)
-
-
# Count executions that completed this stage
-
stage_step = journey.journey_steps.find_by(stage: stage.stage)
-
return 0 unless stage_step
-
-
executions_in_period.joins(:step_executions)
-
.where(step_executions: {
-
journey_step_id: stage_step.id,
-
status: 'completed'
-
})
-
.distinct
-
.count
-
end
-
-
def self.funnel_step_breakdown(journey_id, funnel_name, period_start, period_end)
-
stages = by_funnel(funnel_name)
-
.where(journey_id: journey_id)
-
.where(period_start: period_start, period_end: period_end)
-
.ordered_by_stage
-
-
stages.map do |stage|
-
{
-
stage: stage.stage,
-
stage_order: stage.stage_order,
-
visitors: stage.visitors,
-
conversions: stage.conversions,
-
conversion_rate: stage.conversion_rate,
-
drop_off_rate: stage.drop_off_rate
-
}
-
end
-
end
-
-
def self.funnel_trends(journey_id, funnel_name, period_start, period_end)
-
# Return basic trend data - could be enhanced with historical comparisons
-
stages = by_funnel(funnel_name)
-
.where(journey_id: journey_id)
-
.where(period_start: period_start, period_end: period_end)
-
.ordered_by_stage
-
-
return [] if stages.empty?
-
-
{
-
overall_trend: "stable", # placeholder - could calculate based on historical data
-
conversion_trend: stages.average(:conversion_rate).to_f.round(2),
-
drop_off_trend: stages.average(:drop_off_rate).to_f.round(2),
-
period: {
-
start: period_start,
-
end: period_end
-
}
-
}
-
end
-
end
-
2
class Current < ActiveSupport::CurrentAttributes
-
2
attribute :session
-
2
attribute :user_agent
-
2
attribute :ip_address
-
2
attribute :request_id
-
2
attribute :session_id
-
-
2
delegate :user, to: :session, allow_nil: true
-
end
-
2
class Journey < ApplicationRecord
-
2
belongs_to :user
-
2
belongs_to :campaign, optional: true
-
2
belongs_to :brand, optional: true
-
2
has_one :persona, through: :campaign
-
2
has_many :journey_steps, dependent: :destroy
-
2
has_many :step_transitions, through: :journey_steps
-
2
has_many :journey_executions, dependent: :destroy
-
2
has_many :suggestion_feedbacks, dependent: :destroy
-
2
has_many :journey_insights, dependent: :destroy
-
2
has_many :journey_analytics, class_name: 'JourneyAnalytics', dependent: :destroy
-
2
has_many :conversion_funnels, dependent: :destroy
-
2
has_many :journey_metrics, dependent: :destroy
-
2
has_many :ab_test_variants, dependent: :destroy
-
2
has_many :ab_tests, through: :ab_test_variants
-
-
2
STATUSES = %w[draft published archived].freeze
-
CAMPAIGN_TYPES = %w[
-
2
product_launch
-
brand_awareness
-
lead_generation
-
customer_retention
-
seasonal_promotion
-
content_marketing
-
email_nurture
-
social_media
-
event_promotion
-
custom
-
].freeze
-
-
2
STAGES = %w[awareness consideration conversion retention advocacy].freeze
-
-
2
validates :name, presence: true
-
2
validates :status, inclusion: { in: STATUSES }
-
2
validates :campaign_type, inclusion: { in: CAMPAIGN_TYPES }, allow_blank: true
-
-
2
scope :draft, -> { where(status: 'draft') }
-
2
scope :published, -> { where(status: 'published') }
-
2
scope :archived, -> { where(status: 'archived') }
-
2
scope :active, -> { where(status: %w[draft published]) }
-
-
2
def publish!
-
update!(status: 'published', published_at: Time.current)
-
end
-
-
2
def archive!
-
update!(status: 'archived', archived_at: Time.current)
-
end
-
-
2
def published?
-
status == 'published'
-
end
-
-
2
def duplicate
-
dup.tap do |new_journey|
-
new_journey.name = "#{name} (Copy)"
-
new_journey.status = 'draft'
-
new_journey.published_at = nil
-
new_journey.archived_at = nil
-
new_journey.save!
-
-
journey_steps.each do |step|
-
new_step = step.dup
-
new_step.journey = new_journey
-
new_step.save!
-
end
-
end
-
end
-
-
2
def total_steps
-
journey_steps.count
-
end
-
-
2
def steps_by_stage
-
journey_steps.group(:stage).count
-
end
-
-
2
def to_json_export
-
{
-
name: name,
-
description: description,
-
campaign_type: campaign_type,
-
target_audience: target_audience,
-
goals: goals,
-
metadata: metadata,
-
settings: settings,
-
steps: journey_steps.includes(:transitions_from, :transitions_to).map(&:to_json_export)
-
}
-
end
-
-
# Analytics methods
-
2
def current_analytics(period = 'daily')
-
journey_analytics.order(period_start: :desc).first
-
end
-
-
2
def analytics_summary(days = 30)
-
start_date = days.days.ago
-
end_date = Time.current
-
-
analytics = journey_analytics.where(period_start: start_date..end_date)
-
-
return {} if analytics.empty?
-
-
{
-
total_executions: analytics.sum(:total_executions),
-
completed_executions: analytics.sum(:completed_executions),
-
abandoned_executions: analytics.sum(:abandoned_executions),
-
average_conversion_rate: analytics.average(:conversion_rate)&.round(2) || 0,
-
average_engagement_score: analytics.average(:engagement_score)&.round(2) || 0,
-
period_days: days
-
}
-
end
-
-
2
def funnel_performance(funnel_name = 'default', days = 7)
-
start_date = days.days.ago
-
end_date = Time.current
-
-
ConversionFunnel.funnel_overview(id, funnel_name, start_date, end_date)
-
end
-
-
2
def compare_with_journey(other_journey_id, metrics = JourneyMetric::CORE_METRICS)
-
JourneyMetric.compare_journey_metrics(id, other_journey_id, metrics)
-
end
-
-
2
def performance_trends(periods = 7)
-
JourneyAnalytics.calculate_trends(id, periods)
-
end
-
-
2
def is_ab_test_variant?
-
ab_test_variants.any?
-
end
-
-
2
def ab_test_status
-
return 'not_in_test' unless is_ab_test_variant?
-
-
test = ab_tests.active.first
-
return 'no_active_test' unless test
-
-
variant = ab_test_variants.joins(:ab_test).where(ab_tests: { id: test.id }).first
-
return 'unknown_variant' unless variant
-
-
{
-
test_name: test.name,
-
variant_name: variant.name,
-
is_control: variant.is_control?,
-
test_status: test.status,
-
traffic_percentage: variant.traffic_percentage
-
}
-
end
-
-
2
def persona_context
-
return {} unless campaign&.persona
-
-
campaign.persona.to_campaign_context
-
end
-
-
2
def campaign_context
-
return {} unless campaign
-
-
campaign.to_analytics_context
-
end
-
-
2
def calculate_metrics!(period = 'daily')
-
JourneyMetric.calculate_and_store_metrics(self, period)
-
end
-
-
2
def create_conversion_funnel!(period_start = 1.week.ago, period_end = Time.current, funnel_name = 'default')
-
ConversionFunnel.create_journey_funnel(self, period_start, period_end, funnel_name)
-
ConversionFunnel.calculate_funnel_metrics(id, funnel_name, period_start, period_end)
-
end
-
-
2
def latest_performance_score
-
latest_analytics = current_analytics
-
return 0 unless latest_analytics
-
-
# Weighted performance score
-
conversion_weight = 0.4
-
engagement_weight = 0.3
-
completion_weight = 0.3
-
-
(latest_analytics.conversion_rate * conversion_weight +
-
latest_analytics.engagement_score * engagement_weight +
-
(latest_analytics.completed_executions.to_f / [latest_analytics.total_executions, 1].max * 100) * completion_weight).round(1)
-
end
-
-
# Brand compliance analytics methods
-
2
def brand_compliance_summary(days = 30)
-
return {} unless brand_id.present?
-
-
JourneyInsight.brand_compliance_summary(id, days)
-
end
-
-
2
def brand_compliance_by_step(days = 30)
-
return {} unless brand_id.present?
-
-
JourneyInsight.brand_compliance_by_step(id, days)
-
end
-
-
2
def brand_violations_breakdown(days = 30)
-
return {} unless brand_id.present?
-
-
JourneyInsight.brand_violations_breakdown(id, days)
-
end
-
-
2
def latest_brand_compliance_score
-
return 1.0 unless brand_id.present?
-
-
latest_compliance = journey_insights
-
.brand_compliance
-
.order(calculated_at: :desc)
-
.first
-
-
latest_compliance&.data&.dig('score') || 1.0
-
end
-
-
2
def brand_compliance_trend(days = 30)
-
return 'stable' unless brand_id.present?
-
-
compliance_insights = journey_insights
-
.brand_compliance
-
.where('calculated_at >= ?', days.days.ago)
-
.order(calculated_at: :desc)
-
-
return 'stable' if compliance_insights.count < 3
-
-
scores = compliance_insights.map { |insight| insight.data['score'] }.compact
-
JourneyInsight.calculate_score_trend(scores)
-
end
-
-
2
def overall_brand_health_score
-
return 1.0 unless brand_id.present?
-
-
compliance_summary = brand_compliance_summary(30)
-
return 1.0 if compliance_summary.empty?
-
-
# Calculate overall brand health based on multiple factors
-
compliance_score = compliance_summary[:average_score] || 1.0
-
compliance_rate = (compliance_summary[:compliance_rate] || 100) / 100.0
-
violation_penalty = [compliance_summary[:total_violations] * 0.05, 0.5].min
-
-
# Weighted brand health score
-
health_score = (compliance_score * 0.6) + (compliance_rate * 0.4) - violation_penalty
-
[health_score, 0.0].max.round(3)
-
end
-
-
2
def brand_compliance_alerts
-
return [] unless brand_id.present?
-
-
alerts = []
-
summary = brand_compliance_summary(7) # Last 7 days
-
-
if summary.present?
-
# Alert for low average score
-
if summary[:average_score] < 0.7
-
alerts << {
-
type: 'low_compliance_score',
-
severity: 'high',
-
message: "Average brand compliance score is #{(summary[:average_score] * 100).round(1)}%",
-
recommendation: 'Review content against brand guidelines'
-
}
-
end
-
-
# Alert for declining trend
-
if brand_compliance_trend(7) == 'declining'
-
alerts << {
-
type: 'declining_compliance',
-
severity: 'medium',
-
message: 'Brand compliance trend is declining',
-
recommendation: 'Investigate recent content changes'
-
}
-
end
-
-
# Alert for high violation count
-
if summary[:total_violations] > 10
-
alerts << {
-
type: 'high_violations',
-
severity: 'medium',
-
message: "#{summary[:total_violations]} brand violations in the last 7 days",
-
recommendation: 'Review and fix flagged content'
-
}
-
end
-
end
-
-
alerts
-
end
-
end
-
class JourneyAnalytics < ApplicationRecord
-
belongs_to :journey
-
belongs_to :campaign
-
belongs_to :user
-
-
validates :period_start, presence: true
-
validates :period_end, presence: true
-
validates :total_executions, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :completed_executions, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :abandoned_executions, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
validates :conversion_rate, presence: true, numericality: { greater_than_or_equal_to: 0, less_than_or_equal_to: 100 }
-
validates :engagement_score, presence: true, numericality: { greater_than_or_equal_to: 0, less_than_or_equal_to: 100 }
-
-
validate :period_end_after_start
-
validate :executions_consistency
-
-
scope :for_period, ->(start_date, end_date) { where(period_start: start_date..end_date) }
-
scope :recent, -> { order(period_start: :desc) }
-
scope :high_conversion, -> { where('conversion_rate > ?', 10.0) }
-
scope :low_engagement, -> { where('engagement_score < ?', 50.0) }
-
-
# Time period scopes
-
scope :daily, -> { where('julianday(period_end) - julianday(period_start) <= ?', 1.0) }
-
scope :weekly, -> { where('julianday(period_end) - julianday(period_start) <= ?', 7.0) }
-
scope :monthly, -> { where('julianday(period_end) - julianday(period_start) <= ?', 30.0) }
-
-
def period_duration_days
-
((period_end - period_start) / 1.day).round(1)
-
end
-
-
def completion_rate
-
return 0.0 if total_executions == 0
-
(completed_executions.to_f / total_executions * 100).round(2)
-
end
-
-
def abandonment_rate
-
return 0.0 if total_executions == 0
-
(abandoned_executions.to_f / total_executions * 100).round(2)
-
end
-
-
def average_completion_time_formatted
-
return 'N/A' if average_completion_time == 0
-
-
hours = (average_completion_time / 1.hour).to_i
-
minutes = ((average_completion_time % 1.hour) / 1.minute).to_i
-
-
if hours > 0
-
"#{hours}h #{minutes}m"
-
else
-
"#{minutes}m"
-
end
-
end
-
-
def performance_grade
-
score = (conversion_rate + engagement_score) / 2
-
-
case score
-
when 80..100 then 'A'
-
when 65..79 then 'B'
-
when 50..64 then 'C'
-
when 35..49 then 'D'
-
else 'F'
-
end
-
end
-
-
def self.aggregate_for_period(journey_id, start_date, end_date)
-
analytics = where(journey_id: journey_id)
-
.where(period_start: start_date..end_date)
-
-
return nil if analytics.empty?
-
-
{
-
total_executions: analytics.sum(:total_executions),
-
completed_executions: analytics.sum(:completed_executions),
-
abandoned_executions: analytics.sum(:abandoned_executions),
-
average_conversion_rate: analytics.average(:conversion_rate)&.round(2) || 0,
-
average_engagement_score: analytics.average(:engagement_score)&.round(2) || 0,
-
total_period_days: ((end_date - start_date) / 1.day).round,
-
data_points: analytics.count
-
}
-
end
-
-
def self.calculate_trends(journey_id, periods = 4)
-
recent_analytics = where(journey_id: journey_id)
-
.order(period_start: :desc)
-
.limit(periods)
-
-
return {} if recent_analytics.count < 2
-
-
conversion_trend = calculate_trend(recent_analytics.pluck(:conversion_rate))
-
engagement_trend = calculate_trend(recent_analytics.pluck(:engagement_score))
-
execution_trend = calculate_trend(recent_analytics.pluck(:total_executions))
-
-
{
-
conversion_rate: {
-
trend: conversion_trend[:direction],
-
change_percentage: conversion_trend[:change_percentage]
-
},
-
engagement_score: {
-
trend: engagement_trend[:direction],
-
change_percentage: engagement_trend[:change_percentage]
-
},
-
total_executions: {
-
trend: execution_trend[:direction],
-
change_percentage: execution_trend[:change_percentage]
-
}
-
}
-
end
-
-
def compare_with_previous_period
-
previous_analytics = self.class.where(journey_id: journey_id)
-
.where('period_end <= ?', period_start)
-
.order(period_end: :desc)
-
.first
-
-
return nil unless previous_analytics
-
-
{
-
conversion_rate_change: conversion_rate - previous_analytics.conversion_rate,
-
engagement_score_change: engagement_score - previous_analytics.engagement_score,
-
execution_change: total_executions - previous_analytics.total_executions,
-
completion_rate_change: completion_rate - previous_analytics.completion_rate
-
}
-
end
-
-
def to_chart_data
-
{
-
period: period_start.strftime('%Y-%m-%d'),
-
conversion_rate: conversion_rate,
-
engagement_score: engagement_score,
-
total_executions: total_executions,
-
completion_rate: completion_rate,
-
abandonment_rate: abandonment_rate
-
}
-
end
-
-
private
-
-
def period_end_after_start
-
return unless period_start && period_end
-
-
errors.add(:period_end, 'must be after period start') if period_end <= period_start
-
end
-
-
def executions_consistency
-
return unless total_executions && completed_executions && abandoned_executions
-
-
if completed_executions + abandoned_executions > total_executions
-
errors.add(:base, 'Completed and abandoned executions cannot exceed total executions')
-
end
-
end
-
-
def self.calculate_trend(values)
-
return { direction: :stable, change_percentage: 0 } if values.length < 2
-
-
# Simple linear trend calculation
-
first_value = values.last.to_f # oldest value
-
last_value = values.first.to_f # newest value
-
-
return { direction: :stable, change_percentage: 0 } if first_value == 0
-
-
change_percentage = ((last_value - first_value) / first_value * 100).round(1)
-
-
direction = if change_percentage > 5
-
:up
-
elsif change_percentage < -5
-
:down
-
else
-
:stable
-
end
-
-
{
-
direction: direction,
-
change_percentage: change_percentage.abs
-
}
-
end
-
end
-
2
class JourneyExecution < ApplicationRecord
-
2
include AASM
-
-
2
belongs_to :journey
-
2
belongs_to :user
-
2
belongs_to :current_step, class_name: 'JourneyStep', optional: true
-
2
has_many :step_executions, dependent: :destroy
-
-
2
validates :user_id, uniqueness: { scope: :journey_id, message: "can only have one execution per journey" }
-
-
2
scope :active, -> { where(status: %w[initialized running paused]) }
-
2
scope :completed, -> { where(status: 'completed') }
-
2
scope :failed, -> { where(status: 'failed') }
-
-
2
aasm column: :status do
-
2
state :initialized, initial: true
-
2
state :running
-
2
state :paused
-
2
state :completed
-
2
state :failed
-
2
state :cancelled
-
-
2
event :start do
-
2
transitions from: [:initialized, :paused], to: :running do
-
4
guard { journey.published? }
-
4
after { record_start_time }
-
end
-
end
-
-
2
event :pause do
-
2
transitions from: :running, to: :paused do
-
2
after { record_pause_time }
-
end
-
end
-
-
2
event :resume do
-
2
transitions from: :paused, to: :running do
-
2
after { clear_pause_time }
-
end
-
end
-
-
2
event :complete do
-
2
transitions from: [:running, :paused], to: :completed do
-
4
after { record_completion_time }
-
end
-
end
-
-
2
event :fail do
-
2
transitions from: [:initialized, :running, :paused], to: :failed do
-
6
after { record_failure }
-
end
-
end
-
-
2
event :cancel do
-
2
transitions from: [:initialized, :running, :paused], to: :cancelled
-
end
-
-
2
event :reset do
-
2
transitions from: [:completed, :failed, :cancelled], to: :initialized do
-
6
after { reset_execution_state }
-
end
-
end
-
end
-
-
2
def next_step
-
return journey.journey_steps.entry_points.first if current_step.nil?
-
-
# Find next step based on transitions and conditions
-
available_transitions = current_step.transitions_from.includes(:to_step)
-
-
available_transitions.each do |transition|
-
if transition.evaluate(execution_context)
-
return transition.to_step
-
end
-
end
-
-
# If no conditional transitions match, return sequential next step
-
journey.journey_steps.where(position: current_step.position + 1).first
-
end
-
-
2
def advance_to_next_step!
-
next_step_obj = next_step
-
-
if next_step_obj
-
update!(current_step: next_step_obj)
-
create_step_execution(next_step_obj)
-
-
# Check if this is an exit point
-
complete! if next_step_obj.is_exit_point?
-
else
-
# No more steps available
-
complete!
-
end
-
end
-
-
2
def can_advance?
-
return false unless running?
-
return false if current_step&.is_exit_point?
-
-
next_step.present?
-
end
-
-
2
def progress_percentage
-
return 0 if journey.total_steps == 0
-
return 100 if completed?
-
-
current_position = current_step&.position || 0
-
((current_position.to_f / journey.total_steps) * 100).round(1)
-
end
-
-
2
def elapsed_time
-
return 0 unless started_at
-
-
end_time = completed_at || paused_at || Time.current
-
end_time - started_at
-
end
-
-
2
def add_context(key, value)
-
context = execution_context.dup
-
context[key.to_s] = value
-
update!(execution_context: context)
-
end
-
-
2
def get_context(key)
-
execution_context[key.to_s]
-
end
-
-
2
private
-
-
2
def record_start_time
-
update!(started_at: Time.current) if started_at.nil?
-
end
-
-
2
def record_pause_time
-
update!(paused_at: Time.current)
-
end
-
-
2
def clear_pause_time
-
update!(paused_at: nil)
-
end
-
-
2
def record_completion_time
-
update!(completed_at: Time.current, paused_at: nil)
-
end
-
-
2
def record_failure
-
add_context('failure_time', Time.current)
-
add_context('failure_step', current_step&.name)
-
end
-
-
2
def reset_execution_state
-
update!(
-
current_step: nil,
-
started_at: nil,
-
completed_at: nil,
-
paused_at: nil,
-
execution_context: {},
-
completion_notes: nil
-
)
-
step_executions.destroy_all
-
end
-
-
2
def create_step_execution(step)
-
step_executions.create!(
-
journey_step: step,
-
started_at: Time.current,
-
context: execution_context.dup
-
)
-
end
-
end
-
class JourneyInsight < ApplicationRecord
-
belongs_to :journey
-
-
INSIGHTS_TYPES = %w[
-
ai_suggestions
-
performance_metrics
-
user_behavior
-
completion_rates
-
stage_effectiveness
-
content_performance
-
channel_performance
-
optimization_opportunities
-
predictive_analytics
-
benchmark_comparison
-
brand_compliance
-
brand_voice_analysis
-
brand_guideline_adherence
-
].freeze
-
-
validates :insights_type, inclusion: { in: INSIGHTS_TYPES }
-
validates :calculated_at, presence: true
-
-
scope :active, -> { where('expires_at IS NULL OR expires_at > ?', Time.current) }
-
scope :expired, -> { where('expires_at IS NOT NULL AND expires_at <= ?', Time.current) }
-
scope :by_type, ->(type) { where(insights_type: type) }
-
scope :recent, ->(days = 7) { where('calculated_at >= ?', days.days.ago) }
-
-
# Scopes for different insights types
-
scope :ai_suggestions, -> { by_type('ai_suggestions') }
-
scope :performance_metrics, -> { by_type('performance_metrics') }
-
scope :user_behavior, -> { by_type('user_behavior') }
-
scope :brand_compliance, -> { by_type('brand_compliance') }
-
scope :brand_voice_analysis, -> { by_type('brand_voice_analysis') }
-
scope :brand_guideline_adherence, -> { by_type('brand_guideline_adherence') }
-
-
# Class methods for analytics
-
def self.latest_for_journey(journey_id, insights_type = nil)
-
query = where(journey_id: journey_id).active.order(calculated_at: :desc)
-
query = query.by_type(insights_type) if insights_type
-
query.first
-
end
-
-
def self.insights_summary_for_journey(journey_id)
-
where(journey_id: journey_id)
-
.active
-
.group(:insights_type)
-
.maximum(:calculated_at)
-
.transform_values { |timestamp| where(journey_id: journey_id, calculated_at: timestamp) }
-
end
-
-
def self.cleanup_expired
-
expired.delete_all
-
end
-
-
def self.refresh_stale_insights(threshold = 24.hours)
-
where('calculated_at < ?', threshold.ago).delete_all
-
end
-
-
# Brand compliance analytics class methods
-
def self.brand_compliance_summary(journey_id, days = 30)
-
compliance_insights = where(journey_id: journey_id)
-
.brand_compliance
-
.where('calculated_at >= ?', days.days.ago)
-
.order(calculated_at: :desc)
-
-
return {} if compliance_insights.empty?
-
-
scores = compliance_insights.map { |insight| insight.data['score'] }.compact
-
violations_counts = compliance_insights.map { |insight| insight.data['violations_count'] || 0 }
-
-
{
-
average_score: scores.sum.to_f / scores.length,
-
latest_score: scores.first,
-
score_trend: calculate_score_trend(scores),
-
total_violations: violations_counts.sum,
-
average_violations_per_check: violations_counts.sum.to_f / violations_counts.length,
-
checks_performed: compliance_insights.count,
-
compliant_checks: compliance_insights.count { |insight| insight.data['compliant'] },
-
compliance_rate: compliance_insights.count { |insight| insight.data['compliant'] }.to_f / compliance_insights.count * 100
-
}
-
end
-
-
def self.brand_compliance_by_step(journey_id, days = 30)
-
compliance_insights = where(journey_id: journey_id)
-
.brand_compliance
-
.where('calculated_at >= ?', days.days.ago)
-
-
step_compliance = {}
-
-
compliance_insights.each do |insight|
-
step_id = insight.data['step_id']
-
next unless step_id
-
-
step_compliance[step_id] ||= {
-
scores: [],
-
violations: [],
-
checks: 0
-
}
-
-
step_compliance[step_id][:scores] << insight.data['score']
-
step_compliance[step_id][:violations] << (insight.data['violations_count'] || 0)
-
step_compliance[step_id][:checks] += 1
-
end
-
-
# Calculate averages for each step
-
step_compliance.transform_values do |data|
-
{
-
average_score: data[:scores].sum.to_f / data[:scores].length,
-
total_violations: data[:violations].sum,
-
checks_performed: data[:checks],
-
latest_score: data[:scores].first
-
}
-
end
-
end
-
-
def self.brand_violations_breakdown(journey_id, days = 30)
-
compliance_insights = where(journey_id: journey_id)
-
.brand_compliance
-
.where('calculated_at >= ?', days.days.ago)
-
-
violation_categories = Hash.new(0)
-
violation_severity = Hash.new(0)
-
-
compliance_insights.each do |insight|
-
violations = insight.data['violations'] || []
-
violations.each do |violation|
-
violation_categories[violation['type']] += 1
-
violation_severity[violation['severity']] += 1
-
end
-
end
-
-
{
-
by_category: violation_categories,
-
by_severity: violation_severity,
-
total_violations: violation_categories.values.sum
-
}
-
end
-
-
def self.calculate_score_trend(scores)
-
return 'stable' if scores.length < 3
-
-
recent_scores = scores.first(3)
-
older_scores = scores.last(3)
-
-
recent_avg = recent_scores.sum.to_f / recent_scores.length
-
older_avg = older_scores.sum.to_f / older_scores.length
-
-
diff = recent_avg - older_avg
-
-
if diff > 0.05
-
'improving'
-
elsif diff < -0.05
-
'declining'
-
else
-
'stable'
-
end
-
end
-
-
# Instance methods
-
def expired?
-
expires_at && expires_at <= Time.current
-
end
-
-
def active?
-
!expired?
-
end
-
-
def age_in_hours
-
((Time.current - calculated_at) / 1.hour).round(2)
-
end
-
-
def age_in_days
-
((Time.current - calculated_at) / 1.day).round(2)
-
end
-
-
def time_to_expiry
-
return nil unless expires_at
-
-
seconds_remaining = expires_at - Time.current
-
return 0 if seconds_remaining <= 0
-
-
{
-
days: (seconds_remaining / 1.day).floor,
-
hours: ((seconds_remaining % 1.day) / 1.hour).floor,
-
minutes: ((seconds_remaining % 1.hour) / 1.minute).floor
-
}
-
end
-
-
# Insights data accessors
-
def suggestions_data
-
return {} unless insights_type == 'ai_suggestions'
-
-
data['suggestions'] || []
-
end
-
-
def performance_data
-
return {} unless insights_type == 'performance_metrics'
-
-
data['metrics'] || {}
-
end
-
-
def user_behavior_data
-
return {} unless insights_type == 'user_behavior'
-
-
data['behavior_patterns'] || {}
-
end
-
-
def optimization_opportunities
-
return [] unless insights_type == 'optimization_opportunities'
-
-
data['opportunities'] || []
-
end
-
-
# Brand compliance data accessors
-
def brand_compliance_data
-
return {} unless insights_type == 'brand_compliance'
-
-
{
-
score: data['score'],
-
compliant: data['compliant'],
-
violations: data['violations'] || [],
-
suggestions: data['suggestions'] || [],
-
violations_count: data['violations_count'] || 0,
-
step_id: data['step_id'],
-
brand_id: data['brand_id']
-
}
-
end
-
-
def brand_voice_data
-
return {} unless insights_type == 'brand_voice_analysis'
-
-
data['voice_analysis'] || {}
-
end
-
-
def brand_guideline_data
-
return {} unless insights_type == 'brand_guideline_adherence'
-
-
data['guideline_adherence'] || {}
-
end
-
-
# Data validation and integrity
-
def validate_data_structure
-
case insights_type
-
when 'ai_suggestions'
-
validate_suggestions_data
-
when 'performance_metrics'
-
validate_performance_data
-
when 'user_behavior'
-
validate_behavior_data
-
when 'brand_compliance'
-
validate_brand_compliance_data
-
when 'brand_voice_analysis'
-
validate_brand_voice_data
-
when 'brand_guideline_adherence'
-
validate_brand_guideline_data
-
end
-
end
-
-
# Export and summary methods
-
def to_summary
-
{
-
id: id,
-
journey_id: journey_id,
-
insights_type: insights_type,
-
calculated_at: calculated_at,
-
expires_at: expires_at,
-
age_hours: age_in_hours,
-
active: active?,
-
data_keys: data.keys,
-
metadata_keys: metadata.keys,
-
provider: metadata['provider']
-
}
-
end
-
-
def to_export
-
{
-
insights_type: insights_type,
-
data: data,
-
metadata: metadata,
-
calculated_at: calculated_at,
-
journey_context: {
-
journey_id: journey_id,
-
journey_name: journey.name,
-
journey_status: journey.status
-
}
-
}
-
end
-
-
private
-
-
def validate_suggestions_data
-
suggestions = data['suggestions']
-
return if suggestions.blank?
-
-
unless suggestions.is_a?(Array)
-
errors.add(:data, 'suggestions must be an array')
-
return
-
end
-
-
suggestions.each_with_index do |suggestion, index|
-
unless suggestion.is_a?(Hash)
-
errors.add(:data, "suggestion at index #{index} must be a hash")
-
next
-
end
-
-
required_keys = %w[name description stage content_type channel]
-
missing_keys = required_keys - suggestion.keys
-
-
if missing_keys.any?
-
errors.add(:data, "suggestion at index #{index} missing keys: #{missing_keys.join(', ')}")
-
end
-
end
-
end
-
-
def validate_performance_data
-
metrics = data['metrics']
-
return if metrics.blank?
-
-
unless metrics.is_a?(Hash)
-
errors.add(:data, 'performance metrics must be a hash')
-
end
-
end
-
-
def validate_behavior_data
-
behavior = data['behavior_patterns']
-
return if behavior.blank?
-
-
unless behavior.is_a?(Hash)
-
errors.add(:data, 'behavior patterns must be a hash')
-
end
-
end
-
-
def validate_brand_compliance_data
-
return if data.blank?
-
-
required_keys = %w[score compliant violations_count]
-
missing_keys = required_keys - data.keys
-
-
if missing_keys.any?
-
errors.add(:data, "brand compliance data missing keys: #{missing_keys.join(', ')}")
-
end
-
-
# Validate score is numeric and in valid range
-
if data['score'].present? && (!data['score'].is_a?(Numeric) || data['score'] < 0 || data['score'] > 1)
-
errors.add(:data, 'brand compliance score must be a number between 0 and 1')
-
end
-
-
# Validate compliant is boolean
-
unless [true, false].include?(data['compliant'])
-
errors.add(:data, 'brand compliance compliant field must be boolean')
-
end
-
-
# Validate violations array structure
-
if data['violations'].present?
-
unless data['violations'].is_a?(Array)
-
errors.add(:data, 'violations must be an array')
-
return
-
end
-
-
data['violations'].each_with_index do |violation, index|
-
unless violation.is_a?(Hash)
-
errors.add(:data, "violation at index #{index} must be a hash")
-
next
-
end
-
-
violation_required_keys = %w[type severity message]
-
violation_missing_keys = violation_required_keys - violation.keys
-
-
if violation_missing_keys.any?
-
errors.add(:data, "violation at index #{index} missing keys: #{violation_missing_keys.join(', ')}")
-
end
-
end
-
end
-
end
-
-
def validate_brand_voice_data
-
voice_data = data['voice_analysis']
-
return if voice_data.blank?
-
-
unless voice_data.is_a?(Hash)
-
errors.add(:data, 'brand voice analysis must be a hash')
-
end
-
end
-
-
def validate_brand_guideline_data
-
guideline_data = data['guideline_adherence']
-
return if guideline_data.blank?
-
-
unless guideline_data.is_a?(Hash)
-
errors.add(:data, 'brand guideline adherence must be a hash')
-
end
-
end
-
-
validate :validate_data_structure
-
-
# Callbacks
-
before_save :set_default_expires_at, if: -> { expires_at.blank? && insights_type == 'ai_suggestions' }
-
-
private
-
-
def set_default_expires_at
-
self.expires_at = 24.hours.from_now
-
end
-
end
-
class JourneyMetric < ApplicationRecord
-
belongs_to :journey
-
belongs_to :campaign
-
belongs_to :user
-
-
validates :metric_name, presence: true
-
validates :metric_value, presence: true, numericality: true
-
validates :metric_type, presence: true, inclusion: {
-
in: %w[count rate percentage duration score index]
-
}
-
validates :aggregation_period, presence: true, inclusion: {
-
in: %w[hourly daily weekly monthly quarterly yearly]
-
}
-
validates :calculated_at, presence: true
-
-
# Ensure uniqueness of metrics per journey/period combination
-
validates :metric_name, uniqueness: {
-
scope: [:journey_id, :aggregation_period, :calculated_at]
-
}
-
-
scope :by_metric, ->(metric_name) { where(metric_name: metric_name) }
-
scope :by_type, ->(metric_type) { where(metric_type: metric_type) }
-
scope :by_period, ->(period) { where(aggregation_period: period) }
-
scope :recent, -> { order(calculated_at: :desc) }
-
scope :for_date_range, ->(start_date, end_date) { where(calculated_at: start_date..end_date) }
-
-
# Common metric names
-
CORE_METRICS = %w[
-
total_executions completed_executions abandoned_executions
-
conversion_rate completion_rate engagement_score
-
average_completion_time bounce_rate click_through_rate
-
cost_per_acquisition return_on_investment
-
].freeze
-
-
ENGAGEMENT_METRICS = %w[
-
page_views time_on_page scroll_depth interaction_rate
-
social_shares comments likes video_completion_rate
-
].freeze
-
-
CONVERSION_METRICS = %w[
-
form_submissions downloads purchases signups
-
trial_conversions subscription_rate upsell_rate
-
].freeze
-
-
RETENTION_METRICS = %w[
-
repeat_visits customer_lifetime_value churn_rate
-
retention_rate loyalty_score net_promoter_score
-
].freeze
-
-
ALL_METRICS = (CORE_METRICS + ENGAGEMENT_METRICS +
-
CONVERSION_METRICS + RETENTION_METRICS).freeze
-
-
def self.calculate_and_store_metrics(journey, period = 'daily')
-
calculation_time = Time.current
-
-
# Calculate core metrics
-
calculate_core_metrics(journey, period, calculation_time)
-
-
# Calculate engagement metrics
-
calculate_engagement_metrics(journey, period, calculation_time)
-
-
# Calculate conversion metrics
-
calculate_conversion_metrics(journey, period, calculation_time)
-
-
# Calculate retention metrics
-
calculate_retention_metrics(journey, period, calculation_time)
-
end
-
-
def self.get_metric_trend(journey_id, metric_name, periods = 7, aggregation_period = 'daily')
-
metrics = where(journey_id: journey_id, metric_name: metric_name, aggregation_period: aggregation_period)
-
.order(calculated_at: :desc)
-
.limit(periods)
-
-
return [] if metrics.empty?
-
-
values = metrics.reverse.pluck(:metric_value, :calculated_at)
-
-
{
-
metric_name: metric_name,
-
values: values.map { |value, date| { value: value, date: date } },
-
trend: calculate_trend_direction(values.map(&:first)),
-
latest_value: values.last&.first,
-
change_percentage: calculate_percentage_change(values.map(&:first))
-
}
-
end
-
-
def self.get_journey_dashboard_metrics(journey_id, period = 'daily')
-
latest_metrics = where(journey_id: journey_id, aggregation_period: period)
-
.group(:metric_name)
-
.maximum(:calculated_at)
-
-
dashboard_data = {}
-
-
latest_metrics.each do |metric_name, latest_date|
-
metric = find_by(
-
journey_id: journey_id,
-
metric_name: metric_name,
-
aggregation_period: period,
-
calculated_at: latest_date
-
)
-
-
next unless metric
-
-
dashboard_data[metric_name] = {
-
value: metric.metric_value,
-
type: metric.metric_type,
-
calculated_at: metric.calculated_at,
-
trend: get_metric_trend(journey_id, metric_name, 7, period)[:trend],
-
metadata: metric.metadata
-
}
-
end
-
-
dashboard_data
-
end
-
-
def self.compare_journey_metrics(journey1_id, journey2_id, metric_names = CORE_METRICS, period = 'daily')
-
comparison = {}
-
-
metric_names.each do |metric_name|
-
journey1_metric = where(journey_id: journey1_id, metric_name: metric_name, aggregation_period: period)
-
.order(calculated_at: :desc)
-
.first
-
-
journey2_metric = where(journey_id: journey2_id, metric_name: metric_name, aggregation_period: period)
-
.order(calculated_at: :desc)
-
.first
-
-
next unless journey1_metric && journey2_metric
-
-
comparison[metric_name] = {
-
journey1_value: journey1_metric.metric_value,
-
journey2_value: journey2_metric.metric_value,
-
difference: journey2_metric.metric_value - journey1_metric.metric_value,
-
percentage_change: calculate_percentage_change([journey1_metric.metric_value, journey2_metric.metric_value]),
-
better_performer: journey1_metric.metric_value > journey2_metric.metric_value ? 'journey1' : 'journey2'
-
}
-
end
-
-
comparison
-
end
-
-
def self.get_campaign_rollup_metrics(campaign_id, period = 'daily')
-
campaign_journeys = Journey.where(campaign_id: campaign_id)
-
return {} if campaign_journeys.empty?
-
-
rollup_metrics = {}
-
-
CORE_METRICS.each do |metric_name|
-
journey_metrics = where(
-
journey_id: campaign_journeys.pluck(:id),
-
metric_name: metric_name,
-
aggregation_period: period
-
).group(:journey_id)
-
.maximum(:calculated_at)
-
-
total_value = 0
-
metric_count = 0
-
-
journey_metrics.each do |journey_id, latest_date|
-
metric = find_by(
-
journey_id: journey_id,
-
metric_name: metric_name,
-
aggregation_period: period,
-
calculated_at: latest_date
-
)
-
-
if metric
-
if %w[count duration].include?(metric.metric_type)
-
total_value += metric.metric_value
-
else
-
total_value += metric.metric_value
-
end
-
metric_count += 1
-
end
-
end
-
-
next if metric_count == 0
-
-
rollup_metrics[metric_name] = if %w[rate percentage score].include?(get_metric_type(metric_name))
-
total_value / metric_count # Average for rates/percentages
-
else
-
total_value # Sum for counts
-
end
-
end
-
-
rollup_metrics
-
end
-
-
def formatted_value
-
case metric_type
-
when 'percentage', 'rate'
-
"#{metric_value.round(1)}%"
-
when 'duration'
-
format_duration(metric_value)
-
when 'count'
-
metric_value.to_i.to_s
-
else
-
metric_value.round(2).to_s
-
end
-
end
-
-
def self.metric_definition(metric_name)
-
definitions = {
-
'total_executions' => 'Total number of journey executions started',
-
'completed_executions' => 'Number of journeys completed successfully',
-
'abandoned_executions' => 'Number of journeys abandoned before completion',
-
'conversion_rate' => 'Percentage of executions that resulted in conversion',
-
'completion_rate' => 'Percentage of executions that were completed',
-
'engagement_score' => 'Overall engagement score based on interactions',
-
'average_completion_time' => 'Average time to complete the journey',
-
'bounce_rate' => 'Percentage of visitors who left after viewing only one step',
-
'click_through_rate' => 'Percentage of users who clicked through to next step'
-
}
-
-
definitions[metric_name] || 'Custom metric'
-
end
-
-
private
-
-
def self.calculate_core_metrics(journey, period, calculation_time)
-
period_start = get_period_start(calculation_time, period)
-
-
executions = journey.journey_executions.where(created_at: period_start..calculation_time)
-
-
# Total executions
-
create_metric(journey, 'total_executions', executions.count, 'count', period, calculation_time)
-
-
# Completed executions
-
completed = executions.where(status: 'completed').count
-
create_metric(journey, 'completed_executions', completed, 'count', period, calculation_time)
-
-
# Abandoned executions
-
abandoned = executions.where(status: 'abandoned').count
-
create_metric(journey, 'abandoned_executions', abandoned, 'count', period, calculation_time)
-
-
# Completion rate
-
completion_rate = executions.count > 0 ? (completed.to_f / executions.count * 100) : 0
-
create_metric(journey, 'completion_rate', completion_rate, 'percentage', period, calculation_time)
-
-
# Average completion time
-
completed_executions = executions.where(status: 'completed').where.not(completed_at: nil)
-
avg_time = if completed_executions.any?
-
completed_executions.average('completed_at - started_at') || 0
-
else
-
0
-
end
-
create_metric(journey, 'average_completion_time', avg_time, 'duration', period, calculation_time)
-
end
-
-
def self.calculate_engagement_metrics(journey, period, calculation_time)
-
# Placeholder for engagement metrics calculation
-
# This would integrate with actual user interaction data
-
-
# For now, create sample metrics
-
create_metric(journey, 'engagement_score', rand(70..95), 'score', period, calculation_time)
-
create_metric(journey, 'interaction_rate', rand(40..80), 'percentage', period, calculation_time)
-
end
-
-
def self.calculate_conversion_metrics(journey, period, calculation_time)
-
# Placeholder for conversion metrics calculation
-
# This would integrate with actual conversion tracking
-
-
period_start = get_period_start(calculation_time, period)
-
executions = journey.journey_executions.where(created_at: period_start..calculation_time)
-
-
# Simple conversion rate based on completed journeys
-
conversion_rate = if executions.count > 0
-
(executions.where(status: 'completed').count.to_f / executions.count * 100)
-
else
-
0
-
end
-
-
create_metric(journey, 'conversion_rate', conversion_rate, 'percentage', period, calculation_time)
-
end
-
-
def self.calculate_retention_metrics(journey, period, calculation_time)
-
# Placeholder for retention metrics calculation
-
# This would integrate with actual user behavior tracking
-
-
create_metric(journey, 'retention_rate', rand(60..85), 'percentage', period, calculation_time)
-
end
-
-
def self.create_metric(journey, metric_name, value, type, period, calculation_time)
-
create!(
-
journey: journey,
-
campaign: journey.campaign,
-
user: journey.user,
-
metric_name: metric_name,
-
metric_value: value,
-
metric_type: type,
-
aggregation_period: period,
-
calculated_at: calculation_time
-
)
-
rescue ActiveRecord::RecordNotUnique
-
# Metric already exists for this period, update it
-
existing = find_by(
-
journey: journey,
-
metric_name: metric_name,
-
aggregation_period: period,
-
calculated_at: calculation_time
-
)
-
existing&.update!(metric_value: value)
-
end
-
-
def self.get_period_start(calculation_time, period)
-
case period
-
when 'hourly' then calculation_time.beginning_of_hour
-
when 'daily' then calculation_time.beginning_of_day
-
when 'weekly' then calculation_time.beginning_of_week
-
when 'monthly' then calculation_time.beginning_of_month
-
when 'quarterly' then calculation_time.beginning_of_quarter
-
when 'yearly' then calculation_time.beginning_of_year
-
else calculation_time.beginning_of_day
-
end
-
end
-
-
def self.calculate_trend_direction(values)
-
return :stable if values.length < 2
-
-
first_half = values[0...(values.length / 2)]
-
second_half = values[(values.length / 2)..-1]
-
-
first_avg = first_half.sum.to_f / first_half.length
-
second_avg = second_half.sum.to_f / second_half.length
-
-
change_percentage = ((second_avg - first_avg) / first_avg * 100) rescue 0
-
-
if change_percentage > 5
-
:up
-
elsif change_percentage < -5
-
:down
-
else
-
:stable
-
end
-
end
-
-
def self.calculate_percentage_change(values)
-
return 0 if values.length < 2 || values.first == 0
-
-
((values.last - values.first) / values.first * 100).round(1)
-
end
-
-
def self.get_metric_type(metric_name)
-
case metric_name
-
when *%w[total_executions completed_executions abandoned_executions]
-
'count'
-
when *%w[conversion_rate completion_rate bounce_rate]
-
'percentage'
-
when 'average_completion_time'
-
'duration'
-
when 'engagement_score'
-
'score'
-
else
-
'rate'
-
end
-
end
-
-
def format_duration(seconds)
-
return '0s' if seconds == 0
-
-
if seconds >= 1.hour
-
hours = (seconds / 1.hour).to_i
-
minutes = ((seconds % 1.hour) / 1.minute).to_i
-
"#{hours}h #{minutes}m"
-
elsif seconds >= 1.minute
-
minutes = (seconds / 1.minute).to_i
-
"#{minutes}m"
-
else
-
"#{seconds.to_i}s"
-
end
-
end
-
end
-
2
class JourneyStep < ApplicationRecord
-
2
belongs_to :journey
-
2
has_many :step_executions, dependent: :destroy
-
2
has_many :transitions_from, class_name: 'StepTransition', foreign_key: 'from_step_id', dependent: :destroy
-
2
has_many :transitions_to, class_name: 'StepTransition', foreign_key: 'to_step_id', dependent: :destroy
-
2
has_many :next_steps, through: :transitions_from, source: :to_step
-
2
has_many :previous_steps, through: :transitions_to, source: :from_step
-
-
STEP_TYPES = %w[
-
2
blog_post
-
email_sequence
-
social_media
-
lead_magnet
-
webinar
-
case_study
-
sales_call
-
demo
-
trial_offer
-
onboarding
-
newsletter
-
feedback_survey
-
].freeze
-
-
CONTENT_TYPES = %w[
-
2
email
-
blog_post
-
social_post
-
landing_page
-
video
-
webinar
-
ebook
-
case_study
-
whitepaper
-
infographic
-
podcast
-
advertisement
-
survey
-
demo
-
consultation
-
].freeze
-
-
CHANNELS = %w[
-
2
email
-
website
-
facebook
-
instagram
-
twitter
-
linkedin
-
youtube
-
google_ads
-
display_ads
-
sms
-
push_notification
-
direct_mail
-
event
-
sales_call
-
].freeze
-
-
2
validates :name, presence: true
-
2
validates :stage, inclusion: { in: Journey::STAGES }
-
2
validates :position, presence: true, numericality: { greater_than_or_equal_to: 0 }
-
2
validates :content_type, inclusion: { in: CONTENT_TYPES }, allow_blank: true
-
2
validates :channel, inclusion: { in: CHANNELS }, allow_blank: true
-
2
validates :duration_days, numericality: { greater_than: 0 }, allow_blank: true
-
-
# Brand compliance validations
-
2
validate :validate_brand_compliance, if: :should_validate_brand_compliance?
-
-
2
scope :by_position, -> { order(:position) }
-
2
scope :by_stage, ->(stage) { where(stage: stage) }
-
2
scope :entry_points, -> { where(is_entry_point: true) }
-
2
scope :exit_points, -> { where(is_exit_point: true) }
-
-
2
before_create :set_position
-
2
after_destroy :reorder_positions
-
-
# Brand compliance callbacks
-
2
before_save :check_real_time_compliance, if: :should_check_compliance?
-
2
after_update :broadcast_compliance_status, if: :saved_change_to_description?
-
-
2
def move_to_position(new_position)
-
return if new_position == position
-
-
transaction do
-
if new_position < position
-
journey.journey_steps
-
.where(position: new_position...position)
-
.update_all('position = position + 1')
-
else
-
journey.journey_steps
-
.where(position: (position + 1)..new_position)
-
.update_all('position = position - 1')
-
end
-
-
update!(position: new_position)
-
end
-
end
-
-
2
def add_transition_to(to_step, conditions = {})
-
transition_type = conditions.present? ? 'conditional' : 'sequential'
-
transitions_from.create!(
-
to_step: to_step,
-
conditions: conditions,
-
transition_type: transition_type
-
)
-
end
-
-
2
def remove_transition_to(to_step)
-
transitions_from.where(to_step: to_step).destroy_all
-
end
-
-
2
def can_transition_to?(step)
-
next_steps.include?(step)
-
end
-
-
2
def evaluate_conditions(context = {})
-
return true if conditions.blank?
-
-
conditions.all? do |key, value|
-
case key
-
when 'min_engagement_score'
-
context['engagement_score'].to_i >= value.to_i
-
when 'completed_action'
-
context['completed_actions']&.include?(value)
-
when 'time_since_last_action'
-
context['time_since_last_action'].to_i >= value.to_i
-
else
-
true
-
end
-
end
-
end
-
-
2
def to_json_export
-
{
-
name: name,
-
description: description,
-
stage: stage,
-
position: position,
-
content_type: content_type,
-
channel: channel,
-
duration_days: duration_days,
-
config: config,
-
conditions: conditions,
-
metadata: metadata,
-
is_entry_point: is_entry_point,
-
is_exit_point: is_exit_point,
-
transitions: transitions_from.map { |t| { to: t.to_step.name, conditions: t.conditions } }
-
}
-
end
-
-
# Brand compliance methods
-
2
def check_brand_compliance(options = {})
-
return no_brand_result unless has_brand?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
compliance_service.check_compliance(options)
-
end
-
-
2
def brand_compliant?(threshold = nil)
-
return true unless has_brand?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
compliance_service.meets_minimum_compliance?(threshold)
-
end
-
-
2
def quick_compliance_score
-
return 1.0 unless has_brand?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
compliance_service.quick_score
-
end
-
-
2
def compliance_violations
-
return [] unless has_brand?
-
-
result = check_brand_compliance
-
result[:violations] || []
-
end
-
-
2
def compliance_suggestions
-
return [] unless has_brand?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
recommendations = compliance_service.get_recommendations
-
recommendations[:recommendations] || []
-
end
-
-
2
def auto_fix_compliance_issues
-
return { fixed: false, content: compilable_content } unless has_brand?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
fix_results = compliance_service.auto_fix_violations
-
-
if fix_results[:fixed_content].present?
-
# Update description with fixed content if auto-fix was successful
-
update_column(:description, fix_results[:fixed_content])
-
{ fixed: true, content: fix_results[:fixed_content], fixes: fix_results[:fixes_applied] }
-
else
-
{ fixed: false, content: compilable_content, available_fixes: fix_results[:fixes_available] }
-
end
-
end
-
-
2
def messaging_compliant?(message_text = nil)
-
return true unless has_brand?
-
-
content_to_check = message_text || compilable_content
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: content_to_check,
-
context: build_compliance_context
-
)
-
-
compliance_service.messaging_allowed?(content_to_check)
-
end
-
-
2
def applicable_brand_guidelines
-
return [] unless has_brand?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
compliance_service.applicable_brand_rules
-
end
-
-
2
def brand_context
-
return {} unless has_brand?
-
-
{
-
brand_id: journey.brand.id,
-
brand_name: journey.brand.name,
-
industry: journey.brand.industry,
-
has_messaging_framework: journey.brand.messaging_framework.present?,
-
has_guidelines: journey.brand.brand_guidelines.active.any?,
-
compliance_level: determine_compliance_level
-
}
-
end
-
-
2
def latest_compliance_check
-
journey.journey_insights
-
.where(insights_type: 'brand_compliance')
-
.where("data->>'step_id' = ?", id.to_s)
-
.order(calculated_at: :desc)
-
.first
-
end
-
-
2
def compliance_history(days = 30)
-
journey.journey_insights
-
.where(insights_type: 'brand_compliance')
-
.where("data->>'step_id' = ?", id.to_s)
-
.where('calculated_at >= ?', days.days.ago)
-
.order(calculated_at: :desc)
-
end
-
-
2
private
-
-
2
def set_position
-
if position.nil? || position == 0
-
max_position = journey.journey_steps.where.not(id: id).maximum(:position) || -1
-
self.position = max_position + 1
-
end
-
end
-
-
2
def reorder_positions
-
journey.journey_steps.where('position > ?', position).update_all('position = position - 1')
-
end
-
-
# Brand compliance private methods
-
2
def should_validate_brand_compliance?
-
has_brand? &&
-
(description_changed? || name_changed?) &&
-
!skip_brand_validation? &&
-
compilable_content.present?
-
end
-
-
2
def should_check_compliance?
-
has_brand? &&
-
(will_save_change_to_description? || will_save_change_to_name?) &&
-
!skip_compliance_check?
-
end
-
-
2
def validate_brand_compliance
-
return unless compilable_content.present?
-
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: self,
-
content: compilable_content,
-
context: build_compliance_context
-
)
-
-
# Quick validation check
-
result = compliance_service.pre_generation_check(compilable_content)
-
-
unless result[:allowed]
-
violations = result[:violations] || []
-
if violations.any?
-
critical_violations = violations.select { |v| v[:severity] == 'critical' }
-
if critical_violations.any?
-
errors.add(:description, "Content violates critical brand guidelines: #{critical_violations.map { |v| v[:message] }.join(', ')}")
-
else
-
# Add warnings for non-critical violations
-
errors.add(:description, "Content may violate brand guidelines: #{violations.first[:message]}") if violations.any?
-
end
-
end
-
end
-
end
-
-
2
def check_real_time_compliance
-
return unless compilable_content.present?
-
-
# Store compliance check in metadata for later reference
-
compliance_score = quick_compliance_score
-
self.metadata ||= {}
-
self.metadata['last_compliance_check'] = {
-
score: compliance_score,
-
checked_at: Time.current.iso8601,
-
compliant: compliance_score >= 0.7
-
}
-
-
# Log warning for low compliance scores
-
if compliance_score < 0.5
-
Rails.logger.warn "Journey step #{id} has low brand compliance score: #{compliance_score}"
-
end
-
end
-
-
2
def broadcast_compliance_status
-
return unless has_brand?
-
-
# Broadcast real-time compliance status update
-
ActionCable.server.broadcast(
-
"journey_step_compliance_#{id}",
-
{
-
event: 'compliance_updated',
-
step_id: id,
-
journey_id: journey.id,
-
brand_id: journey.brand.id,
-
compliance_score: quick_compliance_score,
-
timestamp: Time.current
-
}
-
)
-
rescue => e
-
Rails.logger.error "Failed to broadcast compliance status: #{e.message}"
-
end
-
-
2
def has_brand?
-
journey&.brand_id.present?
-
end
-
-
2
def compilable_content
-
# Combine name and description for compliance checking
-
content_parts = [name, description].compact
-
content_parts.join(". ").strip
-
end
-
-
2
def build_compliance_context
-
{
-
step_id: id,
-
step_name: name,
-
content_type: content_type,
-
channel: channel,
-
stage: stage,
-
position: position,
-
is_entry_point: is_entry_point,
-
is_exit_point: is_exit_point,
-
journey_context: {
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience,
-
goals: journey.goals
-
}
-
}
-
end
-
-
2
def determine_compliance_level
-
# Determine compliance level based on step characteristics
-
if is_entry_point? || stage == 'awareness'
-
:strict # Entry points need strict brand compliance
-
elsif %w[conversion retention].include?(stage)
-
:standard # Important stages need standard compliance
-
else
-
:flexible # Other stages can be more flexible
-
end
-
end
-
-
2
def skip_brand_validation?
-
# Allow skipping validation in certain contexts
-
metadata&.dig('skip_brand_validation') == true ||
-
Rails.env.test? && metadata&.dig('test_skip_validation') == true
-
end
-
-
2
def skip_compliance_check?
-
# Allow skipping real-time compliance checks
-
metadata&.dig('skip_compliance_check') == true ||
-
Rails.env.test? && metadata&.dig('test_skip_compliance') == true
-
end
-
-
2
def no_brand_result
-
{
-
compliant: true,
-
score: 1.0,
-
summary: "No brand associated with journey",
-
violations: [],
-
suggestions: [],
-
step_context: {
-
step_id: id,
-
no_brand: true
-
}
-
}
-
end
-
end
-
2
class JourneyTemplate < ApplicationRecord
-
2
has_many :journeys
-
-
# Versioning associations
-
2
belongs_to :original_template, class_name: 'JourneyTemplate', optional: true
-
2
has_many :versions, class_name: 'JourneyTemplate', foreign_key: 'original_template_id', dependent: :destroy
-
-
CATEGORIES = %w[
-
2
b2b
-
b2c
-
ecommerce
-
saas
-
nonprofit
-
education
-
healthcare
-
financial_services
-
real_estate
-
hospitality
-
].freeze
-
-
2
DIFFICULTY_LEVELS = %w[beginner intermediate advanced].freeze
-
-
2
validates :name, presence: true
-
2
validates :category, presence: true, inclusion: { in: CATEGORIES }
-
2
validates :campaign_type, inclusion: { in: Journey::CAMPAIGN_TYPES }, allow_blank: true
-
2
validates :difficulty_level, inclusion: { in: DIFFICULTY_LEVELS }, allow_blank: true
-
2
validates :estimated_duration_days, numericality: { greater_than: 0 }, allow_blank: true
-
2
validates :version, presence: true, numericality: { greater_than: 0 }
-
2
validates :version, uniqueness: { scope: :original_template_id }, if: :original_template_id?
-
-
2
scope :active, -> { where(is_active: true) }
-
2
scope :by_category, ->(category) { where(category: category) }
-
2
scope :by_campaign_type, ->(type) { where(campaign_type: type) }
-
2
scope :popular, -> { order(usage_count: :desc) }
-
2
scope :recent, -> { order(created_at: :desc) }
-
2
scope :published_versions, -> { where(is_published_version: true) }
-
2
scope :latest_versions, -> { joins("LEFT JOIN journey_templates jt2 ON jt2.original_template_id = journey_templates.original_template_id AND jt2.version > journey_templates.version").where("jt2.id IS NULL") }
-
-
2
def create_journey_for_user(user, journey_params = {})
-
journey = user.journeys.build(
-
name: journey_params[:name] || "#{name} - #{Date.current}",
-
description: journey_params[:description] || description,
-
campaign_type: campaign_type,
-
target_audience: journey_params[:target_audience],
-
goals: journey_params[:goals],
-
brand_id: journey_params[:brand_id],
-
metadata: {
-
template_id: id,
-
template_name: name,
-
created_from_template: true
-
}
-
)
-
-
if journey.save
-
create_steps_for_journey(journey)
-
increment!(:usage_count)
-
journey
-
else
-
journey
-
end
-
end
-
-
2
def preview_steps
-
template_data['steps'] || []
-
end
-
-
2
def steps_data
-
template_data['steps'] || []
-
end
-
-
2
def steps_data=(value)
-
self.template_data = (template_data || {}).merge('steps' => value)
-
end
-
-
2
def connections_data
-
template_data['connections'] || []
-
end
-
-
2
def connections_data=(value)
-
self.template_data = (template_data || {}).merge('connections' => value)
-
end
-
-
2
def step_count
-
preview_steps.size
-
end
-
-
2
def stages_covered
-
preview_steps.map { |step| step['stage'] }.uniq
-
end
-
-
2
def channels_used
-
preview_steps.map { |step| step['channel'] }.uniq.compact
-
end
-
-
2
def content_types_included
-
preview_steps.map { |step| step['content_type'] }.uniq.compact
-
end
-
-
2
def is_original?
-
original_template_id.nil?
-
end
-
-
2
def root_template
-
original_template || self
-
end
-
-
2
def all_versions
-
if is_original?
-
[self] + versions.order(:version)
-
else
-
original_template.versions.order(:version)
-
end
-
end
-
-
2
def latest_version
-
if is_original?
-
versions.order(:version).last || self
-
else
-
original_template.latest_version
-
end
-
end
-
-
2
def create_new_version(version_params = {})
-
new_version_number = calculate_next_version_number
-
-
new_version = self.dup
-
new_version.assign_attributes(
-
original_template: root_template,
-
version: new_version_number,
-
parent_version: version,
-
version_notes: version_params[:version_notes],
-
is_published_version: version_params[:is_published_version] || false,
-
usage_count: 0,
-
is_active: true
-
)
-
-
# Update name to include version if it's not the original
-
unless new_version.name.match(/v\d+\.\d+/)
-
new_version.name = "#{name} v#{new_version_number}"
-
end
-
-
new_version
-
end
-
-
2
def publish_version!
-
transaction do
-
# Unpublish other versions of the same template
-
root_template.versions.update_all(is_published_version: false)
-
if root_template != self
-
root_template.update!(is_published_version: false)
-
end
-
-
# Publish this version
-
update!(is_published_version: true)
-
end
-
end
-
-
2
def version_history
-
all_versions.map do |version|
-
{
-
version: version.version,
-
created_at: version.created_at,
-
version_notes: version.version_notes,
-
is_published: version.is_published_version,
-
usage_count: version.usage_count
-
}
-
end
-
end
-
-
2
private
-
-
2
def calculate_next_version_number
-
existing_versions = root_template.versions.pluck(:version)
-
existing_versions << root_template.version
-
-
major_version = existing_versions.map(&:to_i).max || 1
-
minor_versions = existing_versions.select { |v| v.to_i == major_version }.map { |v| (v % 1 * 100).to_i }
-
next_minor = (minor_versions.max || 0) + 1
-
-
# If minor version reaches 100, increment major version
-
if next_minor >= 100
-
major_version += 1
-
next_minor = 0
-
end
-
-
major_version + (next_minor / 100.0)
-
end
-
-
2
def create_steps_for_journey(journey)
-
return unless template_data['steps'].present?
-
-
step_mapping = {}
-
-
# First pass: create all steps
-
template_data['steps'].each_with_index do |step_data, index|
-
step = journey.journey_steps.create!(
-
name: step_data['name'],
-
description: step_data['description'],
-
stage: step_data['stage'],
-
position: index,
-
content_type: step_data['content_type'],
-
channel: step_data['channel'],
-
duration_days: step_data['duration_days'] || 1,
-
config: step_data['config'] || {},
-
conditions: step_data['conditions'] || {},
-
metadata: step_data['metadata'] || {},
-
is_entry_point: step_data['is_entry_point'] || (index == 0),
-
is_exit_point: step_data['is_exit_point'] || false
-
)
-
-
step_mapping[step_data['id']] = step if step_data['id']
-
end
-
-
# Second pass: create transitions
-
template_data['transitions']&.each do |transition_data|
-
from_step = step_mapping[transition_data['from_step_id']]
-
to_step = step_mapping[transition_data['to_step_id']]
-
-
if from_step && to_step
-
StepTransition.create!(
-
from_step: from_step,
-
to_step: to_step,
-
transition_type: transition_data['transition_type'] || 'sequential',
-
conditions: transition_data['conditions'] || {},
-
priority: transition_data['priority'] || 0,
-
metadata: transition_data['metadata'] || {}
-
)
-
end
-
end
-
end
-
end
-
2
class MessagingFramework < ApplicationRecord
-
2
belongs_to :brand
-
-
# Validations
-
2
validates :brand, presence: true, uniqueness: { scope: :active, if: :active? }
-
-
# Scopes
-
2
scope :active, -> { where(active: true) }
-
-
# Callbacks
-
2
before_save :ensure_arrays_for_lists
-
-
# Methods
-
2
def add_key_message(category, message)
-
self.key_messages ||= {}
-
self.key_messages[category] ||= []
-
self.key_messages[category] << message unless self.key_messages[category].include?(message)
-
save
-
end
-
-
2
def add_value_proposition(proposition)
-
self.value_propositions ||= {}
-
self.value_propositions["main"] ||= []
-
self.value_propositions["main"] << proposition unless self.value_propositions["main"].include?(proposition)
-
save
-
end
-
-
2
def add_approved_phrase(phrase)
-
self.approved_phrases ||= []
-
self.approved_phrases << phrase unless self.approved_phrases.include?(phrase)
-
save
-
end
-
-
2
def add_banned_word(word)
-
self.banned_words ||= []
-
self.banned_words << word.downcase unless self.banned_words.include?(word.downcase)
-
save
-
end
-
-
2
def remove_banned_word(word)
-
self.banned_words ||= []
-
self.banned_words.delete(word.downcase)
-
save
-
end
-
-
2
def is_word_banned?(word)
-
return false if banned_words.blank?
-
banned_words.include?(word.downcase)
-
end
-
-
2
def contains_banned_words?(text)
-
return false if banned_words.blank?
-
words = text.downcase.split(/\W+/)
-
(words & banned_words).any?
-
end
-
-
2
def get_banned_words_in_text(text)
-
return [] if banned_words.blank?
-
words = text.downcase.split(/\W+/)
-
words & banned_words
-
end
-
-
2
def tone_formal?
-
tone_attributes["formality"] == "formal"
-
end
-
-
2
def tone_casual?
-
tone_attributes["formality"] == "casual"
-
end
-
-
2
def tone_professional?
-
tone_attributes["style"] == "professional"
-
end
-
-
2
def tone_friendly?
-
tone_attributes["style"] == "friendly"
-
end
-
-
2
private
-
-
2
def ensure_arrays_for_lists
-
self.approved_phrases = [] if approved_phrases.nil?
-
self.banned_words = [] if banned_words.nil?
-
end
-
end
-
2
class Persona < ApplicationRecord
-
2
belongs_to :user
-
2
has_many :campaigns, dependent: :destroy
-
2
has_many :journeys, through: :campaigns
-
-
2
validates :name, presence: true, uniqueness: { scope: :user_id }
-
2
validates :description, presence: true
-
-
# Demographic fields
-
DEMOGRAPHIC_FIELDS = %w[
-
2
age_range gender location income_level education_level
-
employment_status family_status occupation
-
].freeze
-
-
# Behavior fields
-
BEHAVIOR_FIELDS = %w[
-
2
online_activity purchase_behavior social_media_usage
-
content_preferences communication_preferences device_usage
-
].freeze
-
-
# Preference fields
-
PREFERENCE_FIELDS = %w[
-
2
brand_loyalty price_sensitivity channel_preferences
-
messaging_tone content_types shopping_habits
-
].freeze
-
-
# Psychographic fields
-
PSYCHOGRAPHIC_FIELDS = %w[
-
2
values personality_traits lifestyle interests
-
attitudes motivations goals pain_points
-
].freeze
-
-
2
scope :active, -> { joins(:campaigns).where(campaigns: { status: ['active', 'published'] }).distinct }
-
-
2
def display_name
-
name
-
end
-
-
2
def age_range
-
demographics['age_range']
-
end
-
-
2
def primary_channel
-
preferences['channel_preferences']&.first
-
end
-
-
2
def total_campaigns
-
campaigns.count
-
end
-
-
2
def active_campaigns
-
campaigns.where(status: ['active', 'published']).count
-
end
-
-
2
def demographics_summary
-
return 'No demographics data' if demographics.blank?
-
-
summary = []
-
summary << "Age: #{demographics['age_range']}" if demographics['age_range'].present?
-
summary << "Location: #{demographics['location']}" if demographics['location'].present?
-
summary << "Income: #{demographics['income_level']}" if demographics['income_level'].present?
-
-
summary.any? ? summary.join(', ') : 'Limited demographics data'
-
end
-
-
2
def behavior_summary
-
return 'No behavior data' if behaviors.blank?
-
-
summary = []
-
summary << "Online: #{behaviors['online_activity']}" if behaviors['online_activity'].present?
-
summary << "Purchase: #{behaviors['purchase_behavior']}" if behaviors['purchase_behavior'].present?
-
summary << "Social: #{behaviors['social_media_usage']}" if behaviors['social_media_usage'].present?
-
-
summary.any? ? summary.join(', ') : 'Limited behavior data'
-
end
-
-
2
def demographic_data
-
demographics || {}
-
end
-
-
2
def psychographic_data
-
psychographics || {}
-
end
-
-
2
def behavioral_data
-
behaviors || {}
-
end
-
-
2
def to_campaign_context
-
{
-
name: name,
-
description: description,
-
demographics: demographics_summary,
-
behaviors: behavior_summary,
-
preferences: preferences['messaging_tone'] || 'neutral',
-
channels: preferences['channel_preferences'] || []
-
}
-
end
-
end
-
2
class Session < ApplicationRecord
-
2
belongs_to :user
-
-
# Constants
-
2
SESSION_TIMEOUT = 24.hours
-
2
INACTIVE_TIMEOUT = 2.hours
-
-
# Scopes
-
11
scope :active, -> { where("expires_at > ?", Time.current) }
-
2
scope :expired, -> { where("expires_at <= ?", Time.current) }
-
-
# Callbacks
-
2
before_create :set_expiration
-
-
# Instance methods
-
2
def expired?
-
15
expires_at <= Time.current
-
end
-
-
2
def inactive?
-
15
last_active_at && last_active_at < INACTIVE_TIMEOUT.ago
-
end
-
-
2
def touch_activity!
-
15
update!(last_active_at: Time.current)
-
end
-
-
2
def extend_session!
-
update!(expires_at: SESSION_TIMEOUT.from_now)
-
end
-
-
2
private
-
-
2
def set_expiration
-
7
self.expires_at ||= SESSION_TIMEOUT.from_now
-
7
self.last_active_at ||= Time.current
-
end
-
end
-
2
class StepExecution < ApplicationRecord
-
2
belongs_to :journey_execution
-
2
belongs_to :journey_step
-
-
2
STATUSES = %w[pending in_progress completed failed skipped].freeze
-
-
2
validates :status, inclusion: { in: STATUSES }
-
-
2
scope :completed, -> { where(status: 'completed') }
-
2
scope :failed, -> { where(status: 'failed') }
-
2
scope :pending, -> { where(status: 'pending') }
-
2
scope :in_progress, -> { where(status: 'in_progress') }
-
-
2
def start!
-
update!(status: 'in_progress', started_at: Time.current)
-
end
-
-
2
def complete!(result = {})
-
update!(
-
status: 'completed',
-
completed_at: Time.current,
-
result_data: result_data.merge(result)
-
)
-
end
-
-
2
def fail!(reason = nil)
-
data = result_data.dup
-
data['failure_reason'] = reason if reason
-
data['failed_at'] = Time.current
-
-
update!(
-
status: 'failed',
-
completed_at: Time.current,
-
result_data: data
-
)
-
end
-
-
2
def skip!(reason = nil)
-
data = result_data.dup
-
data['skip_reason'] = reason if reason
-
data['skipped_at'] = Time.current
-
-
update!(
-
status: 'skipped',
-
completed_at: Time.current,
-
result_data: data
-
)
-
end
-
-
2
def duration
-
return 0 unless started_at && completed_at
-
completed_at - started_at
-
end
-
-
2
def add_result(key, value)
-
data = result_data.dup
-
data[key.to_s] = value
-
update!(result_data: data)
-
end
-
-
2
def get_result(key)
-
result_data[key.to_s]
-
end
-
-
2
def success?
-
status == 'completed'
-
end
-
-
2
def failed?
-
status == 'failed'
-
end
-
-
2
def pending?
-
status == 'pending'
-
end
-
-
2
def in_progress?
-
status == 'in_progress'
-
end
-
end
-
2
class StepTransition < ApplicationRecord
-
2
belongs_to :from_step, class_name: 'JourneyStep'
-
2
belongs_to :to_step, class_name: 'JourneyStep'
-
-
2
TRANSITION_TYPES = %w[sequential conditional split merge].freeze
-
-
2
validates :from_step, presence: true
-
2
validates :to_step, presence: true
-
2
validates :transition_type, inclusion: { in: TRANSITION_TYPES }
-
2
validates :priority, numericality: { greater_than_or_equal_to: 0 }
-
2
validate :prevent_self_reference
-
2
validate :steps_in_same_journey
-
-
2
scope :by_priority, -> { order(:priority) }
-
2
scope :conditional, -> { where(transition_type: 'conditional') }
-
2
scope :sequential, -> { where(transition_type: 'sequential') }
-
-
2
def evaluate(context = {})
-
return true if conditions.blank?
-
-
conditions.all? do |condition_type, condition_value|
-
evaluate_condition(condition_type, condition_value, context)
-
end
-
end
-
-
2
def journey
-
from_step.journey
-
end
-
-
2
private
-
-
2
def prevent_self_reference
-
errors.add(:to_step, "can't be the same as from_step") if from_step_id == to_step_id
-
end
-
-
2
def steps_in_same_journey
-
return unless from_step && to_step
-
-
if from_step.journey_id != to_step.journey_id
-
errors.add(:base, "Steps must belong to the same journey")
-
end
-
end
-
-
2
def evaluate_condition(condition_type, condition_value, context)
-
case condition_type
-
when 'engagement_threshold'
-
context['engagement_score'].to_f >= condition_value.to_f
-
when 'action_completed'
-
Array(context['completed_actions']).include?(condition_value)
-
when 'time_elapsed'
-
context['time_elapsed'].to_i >= condition_value.to_i
-
when 'form_submitted'
-
context['submitted_forms']&.include?(condition_value)
-
when 'link_clicked'
-
context['clicked_links']&.include?(condition_value)
-
when 'purchase_made'
-
context['purchases']&.any? { |p| p['product_id'] == condition_value }
-
when 'score_range'
-
score = context['score'].to_f
-
score >= condition_value['min'].to_f && score <= condition_value['max'].to_f
-
else
-
true
-
end
-
end
-
end
-
class SuggestionFeedback < ApplicationRecord
-
belongs_to :journey
-
belongs_to :journey_step
-
belongs_to :user
-
-
FEEDBACK_TYPES = %w[
-
suggestion_quality
-
relevance
-
usefulness
-
timing
-
channel_fit
-
content_appropriateness
-
implementation_ease
-
expected_results
-
].freeze
-
-
validates :feedback_type, inclusion: { in: FEEDBACK_TYPES }
-
validates :rating, numericality: { in: 1..5 }, allow_nil: true
-
validates :selected, inclusion: { in: [true, false] }
-
-
scope :positive, -> { where('rating >= ?', 4) }
-
scope :negative, -> { where('rating <= ?', 2) }
-
scope :selected, -> { where(selected: true) }
-
scope :by_feedback_type, ->(type) { where(feedback_type: type) }
-
scope :recent, -> { where('created_at >= ?', 30.days.ago) }
-
-
# Scopes for analytics
-
scope :for_content_type, ->(content_type) {
-
joins(:journey_step).where(journey_steps: { content_type: content_type })
-
}
-
-
scope :for_stage, ->(stage) {
-
joins(:journey_step).where(journey_steps: { stage: stage })
-
}
-
-
scope :for_channel, ->(channel) {
-
joins(:journey_step).where(journey_steps: { channel: channel })
-
}
-
-
# Class methods for analytics
-
def self.average_rating_by_type
-
group(:feedback_type).average(:rating)
-
end
-
-
def self.selection_rate_by_content_type
-
joins(:journey_step)
-
.group('journey_steps.content_type')
-
.group(:selected)
-
.count
-
.transform_keys { |key| key.is_a?(Array) ? { content_type: key[0], selected: key[1] } : key }
-
end
-
-
def self.selection_rate_by_stage
-
joins(:journey_step)
-
.group('journey_steps.stage')
-
.group(:selected)
-
.count
-
.transform_keys { |key| key.is_a?(Array) ? { stage: key[0], selected: key[1] } : key }
-
end
-
-
def self.top_performing_suggestions(limit = 10)
-
where(selected: true)
-
.group(:suggested_step_id)
-
.order('COUNT(*) DESC')
-
.limit(limit)
-
.count
-
end
-
-
def self.feedback_trends(days = 30)
-
where('created_at >= ?', days.days.ago)
-
.group_by_day(:created_at)
-
.group(:feedback_type)
-
.average(:rating)
-
end
-
-
# Instance methods
-
def positive?
-
rating && rating >= 4
-
end
-
-
def negative?
-
rating && rating <= 2
-
end
-
-
def neutral?
-
rating && rating == 3
-
end
-
-
def suggested_step_data
-
metadata['suggested_step_data']
-
end
-
-
def ai_provider
-
metadata['provider']
-
end
-
-
def feedback_timestamp
-
metadata['timestamp']
-
end
-
-
# Validation helpers
-
def validate_rating_for_feedback_type
-
case feedback_type
-
when 'suggestion_quality', 'relevance', 'usefulness'
-
errors.add(:rating, "is required for #{feedback_type}") if rating.blank?
-
end
-
end
-
-
private
-
-
validate :validate_rating_for_feedback_type
-
end
-
2
class User < ApplicationRecord
-
2
has_secure_password
-
2
has_many :sessions, dependent: :destroy
-
2
has_one_attached :avatar
-
2
has_many :activities, dependent: :destroy
-
2
has_many :journeys, dependent: :destroy
-
2
has_many :journey_executions, dependent: :destroy
-
2
has_many :personas, dependent: :destroy
-
2
has_many :campaigns, dependent: :destroy
-
2
has_many :journey_analytics, class_name: 'JourneyAnalytics', dependent: :destroy
-
2
has_many :conversion_funnels, dependent: :destroy
-
2
has_many :journey_metrics, dependent: :destroy
-
2
has_many :ab_tests, dependent: :destroy
-
2
has_many :brands, dependent: :destroy
-
2
has_many :suggestion_feedbacks, dependent: :destroy
-
-
# Self-referential association for suspension tracking
-
2
belongs_to :suspended_by, class_name: "User", optional: true
-
-
32
normalizes :email_address, with: ->(e) { e.strip.downcase }
-
-
2
validates :email_address, presence: true, uniqueness: true, format: { with: URI::MailTo::EMAIL_REGEXP }
-
16
validates :password, length: { minimum: 6 }, if: -> { new_record? || password.present? }
-
-
# Profile validations
-
2
validates :full_name, length: { maximum: 100 }
-
2
validates :bio, length: { maximum: 500 }
-
2
validates :phone_number, format: { with: /\A[\d\s\-\+\(\)]+\z/, allow_blank: true }
-
2
validates :company, length: { maximum: 100 }
-
2
validates :job_title, length: { maximum: 100 }
-
2
validates :timezone, inclusion: { in: ActiveSupport::TimeZone.all.map(&:name) }, allow_blank: true
-
-
# Avatar validations
-
2
validate :acceptable_avatar
-
-
# Role-based access control
-
2
enum :role, { marketer: 0, team_member: 1, admin: 2 }
-
-
# Helper methods for role checking
-
2
def marketer?
-
role == "marketer"
-
end
-
-
2
def team_member?
-
role == "team_member"
-
end
-
-
2
def admin?
-
27
role == "admin"
-
end
-
-
# Password reset token generation
-
2
def password_reset_token
-
signed_id(purpose: :password_reset, expires_in: 15.minutes)
-
end
-
-
# Find user by password reset token
-
2
def self.find_by_password_reset_token!(token)
-
find_signed!(token, purpose: :password_reset)
-
end
-
-
# Profile helpers
-
2
def display_name
-
20
full_name.presence || email_address.split("@").first
-
end
-
-
# Account locking
-
2
def locked?
-
21
locked_at.present?
-
end
-
-
2
def unlock!
-
update!(locked_at: nil, lock_reason: nil)
-
end
-
-
2
def lock!(reason = "Account locked for security reasons")
-
update!(locked_at: Time.current, lock_reason: reason)
-
end
-
-
# Account suspension (different from locking - this is admin-initiated)
-
2
def suspended?
-
6
suspended_at.present?
-
end
-
-
2
def suspend!(reason:, by:)
-
update!(
-
suspended_at: Time.current,
-
suspension_reason: reason,
-
suspended_by: by
-
)
-
end
-
-
2
def unsuspend!
-
update!(
-
suspended_at: nil,
-
suspension_reason: nil,
-
suspended_by: nil
-
)
-
end
-
-
# Check if account is accessible (not locked or suspended)
-
2
def account_accessible?
-
!locked? && !suspended?
-
end
-
-
2
def avatar_variant(size)
-
return unless avatar.attached?
-
-
case size
-
when :thumb
-
avatar.variant(resize_to_limit: [50, 50])
-
when :medium
-
avatar.variant(resize_to_limit: [200, 200])
-
when :large
-
avatar.variant(resize_to_limit: [400, 400])
-
else
-
avatar
-
end
-
end
-
-
2
private
-
-
2
def acceptable_avatar
-
14
return unless avatar.attached?
-
-
unless avatar.blob.byte_size <= 5.megabyte
-
errors.add(:avatar, "is too big (should be at most 5MB)")
-
end
-
-
acceptable_types = ["image/jpeg", "image/jpg", "image/png", "image/gif", "image/webp"]
-
unless acceptable_types.include?(avatar.blob.content_type)
-
errors.add(:avatar, "must be a JPEG, PNG, GIF, or WebP")
-
end
-
end
-
end
-
2
class UserActivity < ApplicationRecord
-
2
belongs_to :user
-
-
# Constants for activity types
-
ACTIVITY_TYPES = {
-
2
login: 'login',
-
logout: 'logout',
-
create: 'create',
-
update: 'update',
-
delete: 'delete',
-
view: 'view',
-
download: 'download',
-
upload: 'upload',
-
failed_login: 'failed_login',
-
password_reset: 'password_reset',
-
profile_update: 'profile_update',
-
suspicious_activity: 'suspicious_activity'
-
}.freeze
-
-
# Suspicious activity patterns
-
SUSPICIOUS_PATTERNS = {
-
2
rapid_requests: { threshold: 100, window: 1.minute },
-
failed_logins: { threshold: 5, window: 15.minutes },
-
unusual_hours: { start_hour: 2, end_hour: 5 }, # 2 AM - 5 AM
-
mass_downloads: { threshold: 50, window: 10.minutes }
-
}.freeze
-
-
# Validations
-
2
validates :action, presence: true
-
2
validates :controller_name, presence: true
-
2
validates :action_name, presence: true
-
2
validates :ip_address, presence: true
-
2
validates :performed_at, presence: true
-
-
# Scopes
-
2
scope :recent, -> { order(performed_at: :desc) }
-
2
scope :by_user, ->(user) { where(user: user) }
-
2
scope :by_action, ->(action) { where(action: action) }
-
2
scope :by_date_range, ->(start_date, end_date) { where(performed_at: start_date..end_date) }
-
2
scope :suspicious, -> { where(action: ACTIVITY_TYPES[:suspicious_activity]) }
-
2
scope :failed_logins, -> { where(action: ACTIVITY_TYPES[:failed_login]) }
-
-
# Callbacks
-
2
before_validation :set_performed_at
-
2
after_create :check_for_suspicious_activity
-
-
# Class methods
-
2
def self.log_activity(user, action, options = {})
-
create!(
-
user: user,
-
action: action,
-
controller_name: options[:controller_name] || 'unknown',
-
action_name: options[:action_name] || 'unknown',
-
resource_type: options[:resource_type],
-
resource_id: options[:resource_id],
-
ip_address: options[:ip_address] || '0.0.0.0',
-
user_agent: options[:user_agent],
-
request_params: options[:request_params],
-
metadata: options[:metadata] || {},
-
performed_at: Time.current
-
)
-
end
-
-
2
def self.check_user_suspicious_activity(user)
-
suspicious_activities = []
-
-
# Check for rapid requests
-
recent_count = by_user(user).where(performed_at: SUSPICIOUS_PATTERNS[:rapid_requests][:window].ago..Time.current).count
-
if recent_count > SUSPICIOUS_PATTERNS[:rapid_requests][:threshold]
-
suspicious_activities << "Rapid requests detected: #{recent_count} requests in #{SUSPICIOUS_PATTERNS[:rapid_requests][:window].inspect}"
-
end
-
-
# Check for multiple failed logins
-
failed_login_count = by_user(user).failed_logins.where(performed_at: SUSPICIOUS_PATTERNS[:failed_logins][:window].ago..Time.current).count
-
if failed_login_count >= SUSPICIOUS_PATTERNS[:failed_logins][:threshold]
-
suspicious_activities << "Multiple failed login attempts: #{failed_login_count} attempts"
-
end
-
-
# Check for unusual hour activity
-
unusual_hour = SUSPICIOUS_PATTERNS[:unusual_hours]
-
current_hour = Time.current.hour
-
if current_hour >= unusual_hour[:start_hour] && current_hour <= unusual_hour[:end_hour]
-
suspicious_activities << "Activity during unusual hours: #{current_hour}:00"
-
end
-
-
suspicious_activities
-
end
-
-
# Instance methods
-
2
def suspicious?
-
action == ACTIVITY_TYPES[:suspicious_activity]
-
end
-
-
2
def resource
-
return nil unless resource_type.present? && resource_id.present?
-
resource_type.constantize.find_by(id: resource_id)
-
rescue NameError
-
nil
-
end
-
-
2
def description
-
case action
-
when ACTIVITY_TYPES[:login]
-
"User logged in"
-
when ACTIVITY_TYPES[:logout]
-
"User logged out"
-
when ACTIVITY_TYPES[:failed_login]
-
"Failed login attempt"
-
when ACTIVITY_TYPES[:password_reset]
-
"Password reset requested"
-
when ACTIVITY_TYPES[:profile_update]
-
"Profile updated"
-
else
-
"#{action.humanize} #{resource_type}" if resource_type.present?
-
end
-
end
-
-
2
private
-
-
2
def set_performed_at
-
self.performed_at ||= Time.current
-
end
-
-
2
def check_for_suspicious_activity
-
return unless user.present?
-
-
suspicious_activities = self.class.check_user_suspicious_activity(user)
-
-
if suspicious_activities.any?
-
self.class.log_activity(
-
user,
-
ACTIVITY_TYPES[:suspicious_activity],
-
metadata: { reasons: suspicious_activities },
-
ip_address: ip_address,
-
user_agent: user_agent
-
)
-
-
# Trigger alert notification
-
# Note: Using SuspiciousActivityAlertJob instead of direct mailer call
-
# to handle both admin notification and potential user lockout
-
Rails.logger.warn "Suspicious UserActivity detected for user #{user.email_address}: #{suspicious_activities.join(', ')}"
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
class ApplicationPolicy
-
attr_reader :user, :record
-
-
def initialize(user, record)
-
@user = user
-
@record = record
-
end
-
-
def index?
-
false
-
end
-
-
def show?
-
false
-
end
-
-
def create?
-
false
-
end
-
-
def new?
-
create?
-
end
-
-
def update?
-
false
-
end
-
-
def edit?
-
update?
-
end
-
-
def destroy?
-
false
-
end
-
-
class Scope
-
def initialize(user, scope)
-
@user = user
-
@scope = scope
-
end
-
-
def resolve
-
raise NoMethodError, "You must define #resolve in #{self.class}"
-
end
-
-
private
-
-
attr_reader :user, :scope
-
end
-
end
-
class JourneyPolicy < ApplicationPolicy
-
def index?
-
user.present?
-
end
-
-
def show?
-
user.present? && user_owns_journey?
-
end
-
-
def create?
-
user.present?
-
end
-
-
def new?
-
create?
-
end
-
-
def update?
-
user.present? && user_owns_journey?
-
end
-
-
def edit?
-
update?
-
end
-
-
def destroy?
-
user.present? && user_owns_journey?
-
end
-
-
def duplicate?
-
show?
-
end
-
-
def publish?
-
update? && record.status == 'draft'
-
end
-
-
def archive?
-
update? && record.status != 'archived'
-
end
-
-
class Scope < ApplicationPolicy::Scope
-
def resolve
-
if user.present?
-
scope.where(user: user)
-
else
-
scope.none
-
end
-
end
-
end
-
-
private
-
-
def user_owns_journey?
-
record.user == user
-
end
-
end
-
class JourneyStepPolicy < ApplicationPolicy
-
def show?
-
user.present? && user_owns_journey?
-
end
-
-
def create?
-
user.present? && user_owns_journey?
-
end
-
-
def new?
-
create?
-
end
-
-
def update?
-
user.present? && user_owns_journey?
-
end
-
-
def edit?
-
update?
-
end
-
-
def destroy?
-
user.present? && user_owns_journey?
-
end
-
-
def move?
-
update?
-
end
-
-
def duplicate?
-
create?
-
end
-
-
class Scope < ApplicationPolicy::Scope
-
def resolve
-
if user.present?
-
scope.joins(:journey).where(journeys: { user: user })
-
else
-
scope.none
-
end
-
end
-
end
-
-
private
-
-
def user_owns_journey?
-
record.journey.user == user
-
end
-
end
-
class JourneyTemplatePolicy < ApplicationPolicy
-
def index?
-
user.present?
-
end
-
-
def show?
-
user.present? && (record.is_active? || admin_or_owner?)
-
end
-
-
def create?
-
user.present?
-
end
-
-
def new?
-
create?
-
end
-
-
def update?
-
user.present? && admin_or_owner?
-
end
-
-
def edit?
-
update?
-
end
-
-
def destroy?
-
user.present? && admin_or_owner?
-
end
-
-
def clone?
-
show?
-
end
-
-
def use_template?
-
show?
-
end
-
-
def builder?
-
update?
-
end
-
-
def builder_react?
-
update?
-
end
-
-
class Scope < ApplicationPolicy::Scope
-
def resolve
-
if user.present?
-
# All users can see active templates
-
# Admins can see all templates
-
if user.admin?
-
scope.all
-
else
-
scope.where(is_active: true)
-
end
-
else
-
scope.none
-
end
-
end
-
end
-
-
private
-
-
def admin_or_owner?
-
user.admin? || (record.respond_to?(:user) && record.user == user)
-
end
-
end
-
class RailsAdminPolicy < ApplicationPolicy
-
def dashboard?
-
user&.admin?
-
end
-
-
def index?
-
user&.admin?
-
end
-
-
def show?
-
user&.admin?
-
end
-
-
def new?
-
user&.admin?
-
end
-
-
def edit?
-
user&.admin?
-
end
-
-
def destroy?
-
user&.admin?
-
end
-
-
def export?
-
user&.admin?
-
end
-
-
def bulk_delete?
-
user&.admin?
-
end
-
-
def show_in_app?
-
user&.admin?
-
end
-
-
def history_index?
-
user&.admin?
-
end
-
-
def history_show?
-
user&.admin?
-
end
-
-
def suspend?
-
user&.admin?
-
end
-
-
def unsuspend?
-
user&.admin?
-
end
-
end
-
class UserPolicy < ApplicationPolicy
-
# Allow users to view their own profile or admins to view any profile
-
def show?
-
user == record || user.admin?
-
end
-
-
# Allow users to update their own profile or admins to update any profile
-
def update?
-
user == record || user.admin?
-
end
-
-
# Only admins can view the user index
-
def index?
-
user.admin?
-
end
-
-
# Only admins can delete users (but not themselves)
-
def destroy?
-
user.admin? && user != record
-
end
-
-
# Only admins can change user roles (but not their own)
-
def change_role?
-
user.admin? && user != record
-
end
-
-
# Only admins can suspend users (but not themselves)
-
def suspend?
-
user.admin? && user != record
-
end
-
-
# Only admins can unsuspend users
-
def unsuspend?
-
user.admin?
-
end
-
-
class Scope < ApplicationPolicy::Scope
-
def resolve
-
if user.admin?
-
scope.all
-
else
-
scope.where(id: user.id)
-
end
-
end
-
end
-
end
-
class AbTestAnalyticsService
-
def initialize(ab_test)
-
@ab_test = ab_test
-
end
-
-
def generate_full_analysis
-
{
-
test_overview: test_overview,
-
variant_performance: variant_performance_analysis,
-
statistical_analysis: statistical_analysis,
-
confidence_intervals: confidence_intervals_analysis,
-
power_analysis: power_analysis,
-
recommendations: generate_recommendations,
-
historical_comparison: historical_comparison,
-
segments_analysis: segments_analysis
-
}
-
end
-
-
def test_overview
-
{
-
test_id: @ab_test.id,
-
test_name: @ab_test.name,
-
status: @ab_test.status,
-
hypothesis: @ab_test.hypothesis,
-
test_type: @ab_test.test_type,
-
duration_days: @ab_test.duration_days,
-
confidence_level: @ab_test.confidence_level,
-
significance_threshold: @ab_test.significance_threshold,
-
total_variants: @ab_test.ab_test_variants.count,
-
total_visitors: @ab_test.ab_test_variants.sum(:total_visitors),
-
total_conversions: @ab_test.ab_test_variants.sum(:conversions),
-
overall_conversion_rate: calculate_overall_conversion_rate,
-
winner_declared: @ab_test.winner_declared?,
-
winner_variant: @ab_test.winner_variant&.name
-
}
-
end
-
-
def variant_performance_analysis
-
variants = @ab_test.ab_test_variants.includes(:journey)
-
-
performance_data = variants.map do |variant|
-
{
-
variant_id: variant.id,
-
variant_name: variant.name,
-
is_control: variant.is_control?,
-
journey_name: variant.journey.name,
-
traffic_percentage: variant.traffic_percentage,
-
total_visitors: variant.total_visitors,
-
conversions: variant.conversions,
-
conversion_rate: variant.conversion_rate,
-
confidence_interval: variant.confidence_interval_range,
-
lift_vs_control: variant.lift_vs_control,
-
significance_vs_control: variant.significance_vs_control,
-
sample_size_adequate: variant.sample_size_adequate?,
-
statistical_power: variant.statistical_power,
-
performance_grade: calculate_variant_grade(variant)
-
}
-
end
-
-
# Add relative rankings
-
performance_data.sort_by! { |v| -v[:conversion_rate] }
-
performance_data.each_with_index do |variant_data, index|
-
variant_data[:performance_rank] = index + 1
-
end
-
-
{
-
variants: performance_data,
-
best_performer: performance_data.first,
-
control_performance: performance_data.find { |v| v[:is_control] },
-
performance_spread: calculate_performance_spread(performance_data)
-
}
-
end
-
-
def statistical_analysis
-
return {} unless @ab_test.running? || @ab_test.completed?
-
-
control_variant = @ab_test.ab_test_variants.find_by(is_control: true)
-
treatment_variants = @ab_test.ab_test_variants.where(is_control: false)
-
-
return {} unless control_variant
-
-
statistical_results = {}
-
-
treatment_variants.each do |treatment|
-
stat_test = perform_statistical_test(control_variant, treatment)
-
-
statistical_results[treatment.name] = {
-
z_score: stat_test[:z_score],
-
p_value: stat_test[:p_value],
-
significance_level: stat_test[:significance_level],
-
is_significant: stat_test[:is_significant],
-
effect_size: stat_test[:effect_size],
-
power_estimate: estimate_statistical_power(control_variant, treatment),
-
sample_size_recommendation: recommend_sample_size(control_variant, treatment)
-
}
-
end
-
-
{
-
control_variant: control_variant.name,
-
treatment_results: statistical_results,
-
overall_test_power: calculate_overall_test_power(statistical_results),
-
significance_achieved: @ab_test.statistical_significance_reached?
-
}
-
end
-
-
def confidence_intervals_analysis
-
variants = @ab_test.ab_test_variants
-
-
confidence_data = variants.map do |variant|
-
ci_range = variant.confidence_interval_range
-
margin_of_error = (ci_range[1] - ci_range[0]) / 2
-
-
{
-
variant_name: variant.name,
-
conversion_rate: variant.conversion_rate,
-
confidence_interval: ci_range,
-
margin_of_error: margin_of_error.round(2),
-
precision_level: classify_precision(margin_of_error),
-
sample_size: variant.total_visitors
-
}
-
end
-
-
{
-
variants_confidence: confidence_data,
-
overlapping_intervals: identify_overlapping_intervals(confidence_data),
-
precision_assessment: assess_overall_precision(confidence_data)
-
}
-
end
-
-
def power_analysis
-
control_variant = @ab_test.ab_test_variants.find_by(is_control: true)
-
return {} unless control_variant
-
-
treatment_variants = @ab_test.ab_test_variants.where(is_control: false)
-
-
power_results = treatment_variants.map do |treatment|
-
current_power = estimate_statistical_power(control_variant, treatment)
-
-
# Calculate required sample sizes for different effect sizes
-
required_samples = {
-
small_effect: calculate_required_sample_size(control_variant, 0.1),
-
medium_effect: calculate_required_sample_size(control_variant, 0.2),
-
large_effect: calculate_required_sample_size(control_variant, 0.5)
-
}
-
-
{
-
variant_name: treatment.name,
-
current_power: current_power,
-
current_sample_size: treatment.total_visitors,
-
required_samples_for_power_80: required_samples,
-
days_to_adequate_power: estimate_days_to_power(treatment),
-
power_assessment: assess_power_level(current_power)
-
}
-
end
-
-
{
-
control_variant: control_variant.name,
-
treatment_power_analysis: power_results,
-
overall_test_adequacy: assess_overall_test_adequacy(power_results)
-
}
-
end
-
-
def generate_recommendations
-
recommendations = []
-
-
# Sample size recommendations
-
if total_sample_size_adequate?
-
recommendations << create_recommendation(
-
'sample_size',
-
'sufficient',
-
'Sample Size Adequate',
-
'Current sample size is sufficient for reliable results.'
-
)
-
else
-
recommendations << create_recommendation(
-
'sample_size',
-
'insufficient',
-
'Increase Sample Size',
-
'Current sample size may not be sufficient for reliable statistical conclusions.',
-
['Continue test to gather more data', 'Consider increasing traffic allocation']
-
)
-
end
-
-
# Statistical significance recommendations
-
if @ab_test.statistical_significance_reached?
-
if @ab_test.winner_declared?
-
recommendations << create_recommendation(
-
'implementation',
-
'ready',
-
'Implement Winning Variant',
-
"#{@ab_test.winner_variant.name} has shown statistically significant improvement.",
-
['Deploy winning variant to all traffic', 'Monitor performance post-implementation']
-
)
-
else
-
recommendations << create_recommendation(
-
'analysis',
-
'review_needed',
-
'Review Statistical Results',
-
'Significance reached but no clear winner declared.',
-
['Review business impact of variants', 'Consider practical significance vs statistical significance']
-
)
-
end
-
else
-
recommendations << create_recommendation(
-
'continue_testing',
-
'in_progress',
-
'Continue Test',
-
'More data needed to reach statistical significance.',
-
['Continue test for more time', 'Consider increasing traffic if possible']
-
)
-
end
-
-
# Performance-based recommendations
-
variant_analysis = variant_performance_analysis
-
control_performance = variant_analysis[:control_performance]
-
best_performer = variant_analysis[:best_performer]
-
-
if best_performer && control_performance
-
lift = best_performer[:lift_vs_control]
-
-
if lift > 20
-
recommendations << create_recommendation(
-
'high_impact',
-
'significant_improvement',
-
'High Impact Variant Identified',
-
"#{best_performer[:variant_name]} shows #{lift}% improvement over control.",
-
['Fast-track implementation if significance is reached', 'Analyze successful elements for future tests']
-
)
-
elsif lift < -10
-
recommendations << create_recommendation(
-
'performance_issue',
-
'negative_impact',
-
'Negative Performance Detected',
-
"Best variant still underperforms control by #{lift.abs}%.",
-
['Stop test and revert to control', 'Analyze failure factors for future tests']
-
)
-
end
-
end
-
-
# Duration recommendations
-
if @ab_test.duration_days > 30
-
recommendations << create_recommendation(
-
'duration',
-
'long_running',
-
'Long-Running Test',
-
'Test has been running for over 30 days.',
-
['Consider concluding test based on current data', 'Evaluate if external factors may be affecting results']
-
)
-
end
-
-
recommendations
-
end
-
-
def historical_comparison
-
# Compare with previous A/B tests in the same campaign
-
campaign = @ab_test.campaign
-
previous_tests = campaign.ab_tests.completed.where.not(id: @ab_test.id)
-
.order(created_at: :desc)
-
.limit(5)
-
-
return {} if previous_tests.empty?
-
-
historical_data = previous_tests.map do |test|
-
{
-
test_name: test.name,
-
duration_days: test.duration_days,
-
winner_conversion_rate: test.winner_variant&.conversion_rate || 0,
-
total_participants: test.ab_test_variants.sum(:total_visitors),
-
lift_achieved: calculate_historical_lift(test),
-
lessons_learned: extract_lessons_learned(test)
-
}
-
end
-
-
{
-
previous_tests: historical_data,
-
average_lift: historical_data.map { |t| t[:lift_achieved] }.sum / historical_data.count,
-
success_rate: calculate_historical_success_rate(previous_tests),
-
patterns: identify_historical_patterns(historical_data)
-
}
-
end
-
-
def segments_analysis
-
# This would analyze performance across different user segments
-
# For now, return placeholder data that would integrate with actual segment tracking
-
-
segments = {
-
demographic: analyze_demographic_segments,
-
behavioral: analyze_behavioral_segments,
-
temporal: analyze_temporal_segments,
-
acquisition_channel: analyze_channel_segments
-
}
-
-
{
-
segments_breakdown: segments,
-
significant_segments: identify_significant_segments(segments),
-
recommendations: generate_segment_recommendations(segments)
-
}
-
end
-
-
private
-
-
def calculate_overall_conversion_rate
-
total_visitors = @ab_test.ab_test_variants.sum(:total_visitors)
-
total_conversions = @ab_test.ab_test_variants.sum(:conversions)
-
-
return 0 if total_visitors == 0
-
(total_conversions.to_f / total_visitors * 100).round(2)
-
end
-
-
def calculate_variant_grade(variant)
-
score = variant.conversion_rate
-
-
case score
-
when 10..Float::INFINITY then 'A'
-
when 7..9.99 then 'B'
-
when 5..6.99 then 'C'
-
when 3..4.99 then 'D'
-
else 'F'
-
end
-
end
-
-
def calculate_performance_spread(performance_data)
-
conversion_rates = performance_data.map { |v| v[:conversion_rate] }
-
max_rate = conversion_rates.max
-
min_rate = conversion_rates.min
-
-
{
-
max_conversion_rate: max_rate,
-
min_conversion_rate: min_rate,
-
spread: (max_rate - min_rate).round(2),
-
coefficient_of_variation: calculate_coefficient_of_variation(conversion_rates)
-
}
-
end
-
-
def perform_statistical_test(control, treatment)
-
# Z-test for proportions
-
p1 = control.conversion_rate / 100.0
-
p2 = treatment.conversion_rate / 100.0
-
n1 = control.total_visitors
-
n2 = treatment.total_visitors
-
-
return default_stat_test if n1 == 0 || n2 == 0
-
-
# Pooled proportion
-
p_pool = (control.conversions + treatment.conversions).to_f / (n1 + n2)
-
-
# Standard error
-
se = Math.sqrt(p_pool * (1 - p_pool) * (1.0/n1 + 1.0/n2))
-
-
return default_stat_test if se == 0
-
-
# Z-score
-
z_score = (p2 - p1) / se
-
-
# P-value (two-tailed test)
-
p_value = 2 * (1 - normal_cdf(z_score.abs))
-
-
# Effect size (Cohen's h)
-
effect_size = 2 * (Math.asin(Math.sqrt(p2)) - Math.asin(Math.sqrt(p1)))
-
-
{
-
z_score: z_score.round(3),
-
p_value: p_value.round(4),
-
significance_level: classify_significance(p_value),
-
is_significant: p_value < 0.05,
-
effect_size: effect_size.round(3)
-
}
-
end
-
-
def default_stat_test
-
{
-
z_score: 0,
-
p_value: 1.0,
-
significance_level: 'not_significant',
-
is_significant: false,
-
effect_size: 0
-
}
-
end
-
-
def estimate_statistical_power(control, treatment)
-
# Simplified power calculation
-
sample_size = [control.total_visitors, treatment.total_visitors].min
-
effect_size = (treatment.conversion_rate - control.conversion_rate).abs / 100.0
-
-
case
-
when sample_size < 100 then 0.2
-
when sample_size < 500 && effect_size > 0.02 then 0.5
-
when sample_size < 1000 && effect_size > 0.01 then 0.7
-
when sample_size >= 1000 && effect_size > 0.01 then 0.8
-
else 0.3
-
end
-
end
-
-
def recommend_sample_size(control, treatment)
-
# Simplified sample size calculation for 80% power
-
baseline_rate = control.conversion_rate / 100.0
-
effect_size = (treatment.conversion_rate - control.conversion_rate).abs / 100.0
-
-
return 0 if effect_size == 0 || baseline_rate == 0
-
-
# Simplified formula - in practice would use more sophisticated calculation
-
estimated_n = (16 * baseline_rate * (1 - baseline_rate)) / (effect_size ** 2)
-
estimated_n.round
-
end
-
-
def calculate_overall_test_power(statistical_results)
-
return 0 if statistical_results.empty?
-
-
powers = statistical_results.values.map { |result| result[:power_estimate] }
-
(powers.sum / powers.count).round(2)
-
end
-
-
def classify_precision(margin_of_error)
-
case margin_of_error
-
when 0..1 then 'very_high'
-
when 1..2 then 'high'
-
when 2..5 then 'medium'
-
when 5..10 then 'low'
-
else 'very_low'
-
end
-
end
-
-
def identify_overlapping_intervals(confidence_data)
-
overlaps = []
-
-
confidence_data.combination(2).each do |variant1, variant2|
-
ci1 = variant1[:confidence_interval]
-
ci2 = variant2[:confidence_interval]
-
-
if intervals_overlap?(ci1, ci2)
-
overlaps << {
-
variant1: variant1[:variant_name],
-
variant2: variant2[:variant_name],
-
overlap_size: calculate_overlap_size(ci1, ci2)
-
}
-
end
-
end
-
-
overlaps
-
end
-
-
def assess_overall_precision(confidence_data)
-
avg_margin = confidence_data.map { |v| v[:margin_of_error] }.sum / confidence_data.count
-
-
case avg_margin
-
when 0..2 then 'high_precision'
-
when 2..5 then 'medium_precision'
-
else 'low_precision'
-
end
-
end
-
-
def total_sample_size_adequate?
-
total_visitors = @ab_test.ab_test_variants.sum(:total_visitors)
-
total_visitors >= 1000 # Simplified threshold
-
end
-
-
def create_recommendation(type, status, title, description, action_items = [])
-
{
-
type: type,
-
status: status,
-
title: title,
-
description: description,
-
action_items: action_items,
-
priority: determine_priority(type, status)
-
}
-
end
-
-
def determine_priority(type, status)
-
case type
-
when 'implementation', 'high_impact' then 'high'
-
when 'performance_issue', 'sample_size' then 'medium'
-
else 'low'
-
end
-
end
-
-
def calculate_historical_lift(test)
-
return 0 unless test.winner_variant
-
-
control = test.ab_test_variants.find_by(is_control: true)
-
return 0 unless control
-
-
((test.winner_variant.conversion_rate - control.conversion_rate) / control.conversion_rate * 100).round(1)
-
end
-
-
def extract_lessons_learned(test)
-
# This would analyze the test results and extract key insights
-
# For now, return placeholder insights
-
[
-
"#{test.test_type} tests typically require #{test.duration_days} days for significance",
-
"Winner achieved #{calculate_historical_lift(test)}% lift"
-
]
-
end
-
-
def calculate_historical_success_rate(previous_tests)
-
successful_tests = previous_tests.count { |test| test.winner_variant&.conversion_rate.to_f > 0 }
-
return 0 if previous_tests.empty?
-
-
(successful_tests.to_f / previous_tests.count * 100).round(1)
-
end
-
-
def identify_historical_patterns(historical_data)
-
return [] if historical_data.empty?
-
-
patterns = []
-
-
avg_duration = historical_data.map { |t| t[:duration_days] }.sum / historical_data.count
-
patterns << "Average test duration: #{avg_duration.round} days"
-
-
avg_lift = historical_data.map { |t| t[:lift_achieved] }.sum / historical_data.count
-
patterns << "Average lift achieved: #{avg_lift.round(1)}%"
-
-
patterns
-
end
-
-
def analyze_demographic_segments
-
# Placeholder for demographic segment analysis
-
{
-
age_groups: {
-
'18-25' => { control_cr: 4.2, treatment_cr: 5.1, significance: 'not_significant' },
-
'26-35' => { control_cr: 5.8, treatment_cr: 7.2, significance: 'significant' },
-
'36-45' => { control_cr: 6.1, treatment_cr: 6.3, significance: 'not_significant' }
-
}
-
}
-
end
-
-
def analyze_behavioral_segments
-
# Placeholder for behavioral segment analysis
-
{
-
engagement_level: {
-
'high' => { control_cr: 8.2, treatment_cr: 9.8, significance: 'significant' },
-
'medium' => { control_cr: 5.1, treatment_cr: 5.9, significance: 'marginally_significant' },
-
'low' => { control_cr: 2.8, treatment_cr: 3.1, significance: 'not_significant' }
-
}
-
}
-
end
-
-
def analyze_temporal_segments
-
# Placeholder for temporal segment analysis
-
{
-
time_of_day: {
-
'morning' => { control_cr: 5.5, treatment_cr: 6.8, significance: 'significant' },
-
'afternoon' => { control_cr: 4.9, treatment_cr: 5.2, significance: 'not_significant' },
-
'evening' => { control_cr: 6.2, treatment_cr: 7.1, significance: 'marginally_significant' }
-
}
-
}
-
end
-
-
def analyze_channel_segments
-
# Placeholder for acquisition channel analysis
-
{
-
acquisition_channel: {
-
'organic' => { control_cr: 7.2, treatment_cr: 8.5, significance: 'significant' },
-
'paid_search' => { control_cr: 4.8, treatment_cr: 5.1, significance: 'not_significant' },
-
'social' => { control_cr: 3.9, treatment_cr: 4.7, significance: 'marginally_significant' }
-
}
-
}
-
end
-
-
def identify_significant_segments(segments)
-
significant = []
-
-
segments.each do |segment_type, segment_data|
-
segment_data.each do |segment_name, data|
-
if data[:significance] == 'significant'
-
significant << {
-
segment_type: segment_type,
-
segment_name: segment_name,
-
control_cr: data[:control_cr],
-
treatment_cr: data[:treatment_cr],
-
lift: ((data[:treatment_cr] - data[:control_cr]) / data[:control_cr] * 100).round(1)
-
}
-
end
-
end
-
end
-
-
significant
-
end
-
-
def generate_segment_recommendations(segments)
-
recommendations = []
-
-
significant_segments = identify_significant_segments(segments)
-
-
if significant_segments.any?
-
recommendations << "Consider targeting #{significant_segments.first[:segment_name]} segment for maximum impact"
-
end
-
-
recommendations
-
end
-
-
# Statistical helper methods
-
def normal_cdf(x)
-
# Simplified normal CDF approximation
-
(1 + Math.erf(x / Math.sqrt(2))) / 2
-
end
-
-
def classify_significance(p_value)
-
case p_value
-
when 0..0.001 then 'highly_significant'
-
when 0.001..0.01 then 'very_significant'
-
when 0.01..0.05 then 'significant'
-
when 0.05..0.1 then 'marginally_significant'
-
else 'not_significant'
-
end
-
end
-
-
def calculate_coefficient_of_variation(values)
-
return 0 if values.empty?
-
-
mean = values.sum.to_f / values.count
-
return 0 if mean == 0
-
-
variance = values.sum { |v| (v - mean) ** 2 } / values.count
-
std_dev = Math.sqrt(variance)
-
-
(std_dev / mean * 100).round(2)
-
end
-
-
def intervals_overlap?(ci1, ci2)
-
ci1[0] <= ci2[1] && ci2[0] <= ci1[1]
-
end
-
-
def calculate_overlap_size(ci1, ci2)
-
return 0 unless intervals_overlap?(ci1, ci2)
-
-
overlap_start = [ci1[0], ci2[0]].max
-
overlap_end = [ci1[1], ci2[1]].min
-
-
overlap_end - overlap_start
-
end
-
-
def calculate_required_sample_size(control_variant, minimum_detectable_effect)
-
baseline_rate = control_variant.conversion_rate / 100.0
-
return 0 if baseline_rate == 0
-
-
# Simplified sample size calculation for 80% power, 5% significance
-
effect_size = minimum_detectable_effect
-
z_alpha = 1.96 # 5% significance level
-
z_beta = 0.84 # 80% power
-
-
numerator = (z_alpha + z_beta) ** 2 * 2 * baseline_rate * (1 - baseline_rate)
-
denominator = effect_size ** 2
-
-
(numerator / denominator).round
-
end
-
-
def estimate_days_to_power(variant)
-
return 'N/A' unless variant.expected_visitors_per_day > 0
-
-
required_sample = recommend_sample_size(
-
@ab_test.ab_test_variants.find_by(is_control: true),
-
variant
-
)
-
-
additional_visitors_needed = [required_sample - variant.total_visitors, 0].max
-
days_needed = (additional_visitors_needed / variant.expected_visitors_per_day).ceil
-
-
days_needed > 0 ? days_needed : 0
-
end
-
-
def assess_power_level(power)
-
case power
-
when 0.8..1.0 then 'adequate'
-
when 0.6..0.79 then 'moderate'
-
when 0.4..0.59 then 'low'
-
else 'insufficient'
-
end
-
end
-
-
def assess_overall_test_adequacy(power_results)
-
adequate_variants = power_results.count { |result| result[:power_assessment] == 'adequate' }
-
total_variants = power_results.count
-
-
case adequate_variants.to_f / total_variants
-
when 0.8..1.0 then 'test_ready'
-
when 0.5..0.79 then 'mostly_adequate'
-
when 0.2..0.49 then 'needs_improvement'
-
else 'inadequate'
-
end
-
end
-
end
-
2
class ActivityLogger
-
2
include Singleton
-
-
SECURITY_EVENTS = %w[
-
2
authentication_failure
-
authorization_failure
-
suspicious_activity
-
account_locked
-
password_reset
-
admin_action
-
data_export
-
bulk_operation
-
system_error
-
repeated_errors
-
unusual_error_pattern
-
].freeze
-
-
PERFORMANCE_EVENTS = %w[
-
2
slow_request
-
database_slow_query
-
cache_miss
-
api_timeout
-
background_job_failure
-
].freeze
-
-
2
class << self
-
2
delegate :log, :security, :performance, :audit, to: :instance
-
end
-
-
2
def initialize
-
2
@logger = Rails.logger
-
2
@security_logger = Rails.application.config.respond_to?(:security_logger) ?
-
Rails.application.config.security_logger :
-
2
Rails.logger
-
end
-
-
# General activity logging
-
2
def log(level, message, context = {})
-
9
structured_log = build_log_entry(message, context)
-
9
@logger.send(level, structured_log.to_json)
-
-
# Also log to database if it's an important event
-
9
persist_to_database(level, message, context) if should_persist?(level, context)
-
end
-
-
# Security-specific logging
-
2
def security(event_type, message, context = {})
-
return unless SECURITY_EVENTS.include?(event_type.to_s)
-
-
context[:event_type] = event_type
-
context[:security_event] = true
-
-
@security_logger.tagged('SECURITY', event_type.to_s.upcase) do
-
@security_logger.warn build_log_entry(message, context).to_json
-
end
-
-
# Trigger notifications for critical security events
-
notify_security_event(event_type, message, context) if critical_security_event?(event_type)
-
-
# Instrument for monitoring
-
ActiveSupport::Notifications.instrument('suspicious_activity.security',
-
event_type: event_type,
-
message: message,
-
context: context
-
)
-
end
-
-
# Performance logging
-
2
def performance(metric_type, message, context = {})
-
return unless PERFORMANCE_EVENTS.include?(metric_type.to_s)
-
-
context[:metric_type] = metric_type
-
context[:performance_event] = true
-
-
@logger.tagged('PERFORMANCE', metric_type.to_s.upcase) do
-
@logger.info build_log_entry(message, context).to_json
-
end
-
-
# Send to monitoring service
-
send_to_monitoring(metric_type, context) if Rails.env.production?
-
end
-
-
# Audit logging for compliance
-
2
def audit(action, resource, changes = {}, user = nil)
-
audit_entry = {
-
action: action,
-
resource_type: resource.class.name,
-
resource_id: resource.id,
-
changes: sanitize_changes(changes),
-
user_id: user&.id,
-
user_email: user&.email_address,
-
timestamp: Time.current.iso8601
-
}
-
-
@logger.tagged('AUDIT') do
-
@logger.info audit_entry.to_json
-
end
-
-
# Store audit trail in database
-
if defined?(AdminAuditLog) && user
-
AdminAuditLog.create!(
-
user: user,
-
action: action,
-
auditable: resource,
-
change_details: sanitize_changes(changes).to_json,
-
ip_address: Current.ip_address,
-
user_agent: Current.user_agent
-
)
-
end
-
end
-
-
2
private
-
-
2
def build_log_entry(message, context = {})
-
{
-
9
timestamp: Time.current.iso8601,
-
level: context[:level] || 'info',
-
message: message,
-
request_id: Current.request_id || Thread.current[:request_id],
-
user_id: Current.user&.id,
-
ip_address: Current.ip_address,
-
user_agent: Current.user_agent,
-
session_id: Current.session_id,
-
context: context.except(:level)
-
}.compact
-
end
-
-
2
def should_persist?(level, context)
-
# Persist warnings, errors, and security events
-
9
%w[warn error fatal].include?(level.to_s) ||
-
context[:security_event] ||
-
context[:audit_event]
-
end
-
-
2
def persist_to_database(level, message, context)
-
return unless Current.user
-
-
Activity.create!(
-
user: Current.user,
-
action: context[:action] || 'system_log',
-
controller: context[:controller] || 'system',
-
metadata: {
-
message: message,
-
level: level,
-
context: context
-
},
-
suspicious: context[:security_event] || level.to_s == 'error'
-
)
-
rescue => e
-
Rails.logger.error "Failed to persist log to database: #{e.message}"
-
end
-
-
2
def critical_security_event?(event_type)
-
%w[suspicious_activity account_locked authorization_failure system_error repeated_errors].include?(event_type.to_s)
-
end
-
-
2
def notify_security_event(event_type, message, context)
-
# Queue notification job
-
if defined?(SecurityNotificationJob)
-
SecurityNotificationJob.perform_later(
-
event_type: event_type,
-
message: message,
-
context: context
-
)
-
end
-
end
-
-
2
def send_to_monitoring(metric_type, context)
-
# Integration with monitoring services like DataDog, New Relic, etc.
-
# This is a placeholder for actual monitoring integration
-
Rails.logger.info "Monitoring metric: #{metric_type} - #{context.to_json}"
-
end
-
-
2
def sanitize_changes(changes)
-
# Remove sensitive data from audit logs
-
sensitive_fields = %w[password password_confirmation password_digest token secret]
-
-
changes.deep_dup.tap do |sanitized|
-
sensitive_fields.each do |field|
-
sanitized.delete(field)
-
sanitized.delete(field.to_sym)
-
end
-
end
-
end
-
-
# Error pattern detection methods
-
2
def self.track_error_pattern(error_type, context = {})
-
return unless Rails.env.production?
-
-
# Track error patterns by IP, user, and error type
-
ip_key = "error_pattern_ip_#{context[:ip_address]}_#{error_type}"
-
user_key = "error_pattern_user_#{context[:user_id]}_#{error_type}" if context[:user_id]
-
global_key = "error_pattern_global_#{error_type}"
-
-
# Increment counters
-
ip_count = Rails.cache.increment(ip_key, 1, expires_in: 1.hour) || 1
-
user_count = Rails.cache.increment(user_key, 1, expires_in: 1.hour) || 1 if user_key
-
global_count = Rails.cache.increment(global_key, 1, expires_in: 1.hour) || 1
-
-
# Check for suspicious patterns
-
check_error_patterns(error_type, ip_count, user_count, global_count, context)
-
end
-
-
2
def self.check_error_patterns(error_type, ip_count, user_count, global_count, context)
-
# IP-based pattern detection
-
if ip_count && ip_count > 20
-
instance.security('repeated_errors',
-
"Excessive #{error_type} errors from IP",
-
context.merge(error_count: ip_count, pattern_type: 'ip_based')
-
)
-
end
-
-
# User-based pattern detection
-
if user_count && user_count > 15
-
instance.security('repeated_errors',
-
"Excessive #{error_type} errors from user",
-
context.merge(error_count: user_count, pattern_type: 'user_based')
-
)
-
end
-
-
# Global pattern detection
-
if global_count && global_count > 100
-
instance.security('unusual_error_pattern',
-
"Unusual spike in #{error_type} errors globally",
-
context.merge(error_count: global_count, pattern_type: 'global_spike')
-
)
-
end
-
end
-
-
2
def self.error_recovery_suggestions(error_type, context = {})
-
case error_type.to_s
-
when 'not_found'
-
[
-
"Check URL for typos",
-
"Use site navigation",
-
"Search for content",
-
"Contact support if needed"
-
]
-
when 'unprocessable_entity'
-
[
-
"Review form data for completeness",
-
"Check data format requirements",
-
"Refresh session if expired",
-
"Contact support for permission issues"
-
]
-
when 'internal_server_error'
-
[
-
"Wait a few minutes and try again",
-
"Check system status page",
-
"Try different browser or device",
-
"Contact support if problem persists"
-
]
-
else
-
[
-
"Refresh the page",
-
"Try again in a few minutes",
-
"Contact support if issue continues"
-
]
-
end
-
end
-
end
-
class ActivityReportService
-
attr_reader :user, :start_date, :end_date
-
-
def initialize(user, start_date: 30.days.ago, end_date: Time.current)
-
@user = user
-
@start_date = start_date.beginning_of_day
-
@end_date = end_date.end_of_day
-
end
-
-
# Class method for recurring job
-
def self.generate_daily_reports
-
Rails.logger.info "Generating daily activity reports..."
-
-
# Generate reports for all admin users
-
User.admin.find_each do |admin|
-
report = new(admin, start_date: 1.day.ago).generate_report
-
-
# Send email if configured
-
if Rails.application.config.activity_alerts.enabled && admin.notification_email?
-
AdminMailer.daily_activity_report(admin, report).deliver_later
-
end
-
-
# Log completion
-
ActivityLogger.log(:info, "Daily report generated for admin", {
-
admin_id: admin.id,
-
total_activities: report[:summary][:total_activities]
-
})
-
end
-
-
Rails.logger.info "Daily activity reports completed."
-
end
-
-
def generate_report
-
{
-
summary: generate_summary,
-
activity_breakdown: activity_breakdown,
-
suspicious_activities: suspicious_activity_summary,
-
performance_metrics: performance_metrics,
-
security_events: security_events,
-
access_patterns: access_patterns,
-
device_usage: device_usage,
-
recommendations: generate_recommendations
-
}
-
end
-
-
def generate_summary
-
activities = user_activities
-
-
{
-
total_activities: activities.count,
-
date_range: {
-
start: start_date,
-
end: end_date
-
},
-
most_active_day: most_active_day(activities),
-
average_daily_activities: average_daily_activities(activities),
-
suspicious_count: activities.suspicious.count,
-
failed_requests: activities.failed_requests.count,
-
unique_ips: activities.distinct.count(:ip_address),
-
unique_sessions: activities.distinct.count(:session_id)
-
}
-
end
-
-
def activity_breakdown
-
activities = user_activities
-
-
# Group by controller and action
-
breakdown = activities
-
.group(:controller, :action)
-
.count
-
.map { |k, v| { controller: k[0], action: k[1], count: v } }
-
.sort_by { |item| -item[:count] }
-
-
# Add percentage
-
total = activities.count
-
breakdown.each do |item|
-
item[:percentage] = ((item[:count].to_f / total) * 100).round(2)
-
end
-
-
breakdown
-
end
-
-
def suspicious_activity_summary
-
suspicious = user_activities.suspicious
-
-
return { count: 0, events: [] } if suspicious.empty?
-
-
{
-
count: suspicious.count,
-
events: suspicious.map do |activity|
-
{
-
occurred_at: activity.occurred_at,
-
action: activity.full_action,
-
ip_address: activity.ip_address,
-
reasons: activity.metadata&.[]('suspicious_reasons') || [],
-
user_agent: activity.user_agent
-
}
-
end,
-
patterns: analyze_suspicious_patterns(suspicious)
-
}
-
end
-
-
def performance_metrics
-
activities = user_activities.where.not(response_time: nil)
-
-
return {} if activities.empty?
-
-
response_times = activities.pluck(:response_time)
-
-
{
-
average_response_time: (response_times.sum / response_times.size * 1000).round(2),
-
median_response_time: (median(response_times) * 1000).round(2),
-
slowest_actions: slowest_actions(activities),
-
response_time_distribution: response_time_distribution(response_times)
-
}
-
end
-
-
def security_events
-
events = []
-
-
# Failed login attempts
-
failed_logins = user_activities
-
.where(controller: 'sessions', action: 'create')
-
.failed_requests
-
-
if failed_logins.any?
-
events << {
-
type: 'failed_login_attempts',
-
count: failed_logins.count,
-
last_attempt: failed_logins.maximum(:occurred_at),
-
ip_addresses: failed_logins.distinct.pluck(:ip_address)
-
}
-
end
-
-
# Authorization failures
-
auth_failures = user_activities
-
.where("metadata LIKE ?", '%NotAuthorizedError%')
-
-
if auth_failures.any?
-
events << {
-
type: 'authorization_failures',
-
count: auth_failures.count,
-
resources: auth_failures.map { |a| a.full_action }.uniq
-
}
-
end
-
-
# Account lockouts
-
if user.locked_at.present? && user.locked_at >= start_date
-
events << {
-
type: 'account_locked',
-
locked_at: user.locked_at,
-
reason: user.lock_reason
-
}
-
end
-
-
events
-
end
-
-
def access_patterns
-
activities = user_activities
-
-
# Group by hour of day
-
hourly_pattern = activities
-
.group_by { |a| a.occurred_at.hour }
-
.transform_values(&:count)
-
.sort.to_h
-
-
# Group by day of week
-
daily_pattern = activities
-
.group_by { |a| a.occurred_at.strftime('%A') }
-
.transform_values(&:count)
-
-
# Most accessed resources
-
top_resources = activities
-
.group(:request_path)
-
.count
-
.sort_by { |_, count| -count }
-
.first(10)
-
.to_h
-
-
{
-
hourly_distribution: hourly_pattern,
-
daily_distribution: daily_pattern,
-
top_resources: top_resources,
-
access_times: {
-
first_access: activities.minimum(:occurred_at),
-
last_access: activities.maximum(:occurred_at),
-
most_active_hour: hourly_pattern.max_by { |_, v| v }&.first,
-
most_active_day: daily_pattern.max_by { |_, v| v }&.first
-
}
-
}
-
end
-
-
def device_usage
-
activities = user_activities
-
-
{
-
devices: activities.group(:device_type).count,
-
browsers: activities.group(:browser_name).count,
-
operating_systems: activities.group(:os_name).count,
-
unique_user_agents: activities.distinct.count(:user_agent)
-
}
-
end
-
-
private
-
-
def user_activities
-
@user_activities ||= user.activities
-
.where(occurred_at: start_date..end_date)
-
.includes(:user)
-
end
-
-
def most_active_day(activities)
-
return nil if activities.empty?
-
-
activities
-
.group_by { |a| a.occurred_at.to_date }
-
.max_by { |_, acts| acts.count }
-
&.first
-
end
-
-
def average_daily_activities(activities)
-
days = ((end_date - start_date) / 1.day).ceil
-
(activities.count.to_f / days).round(2)
-
end
-
-
def analyze_suspicious_patterns(suspicious_activities)
-
patterns = {}
-
-
# Group by reason
-
reasons = suspicious_activities
-
.flat_map { |a| a.metadata&.[]('suspicious_reasons') || [] }
-
.tally
-
-
patterns[:by_reason] = reasons
-
-
# Time-based patterns
-
patterns[:by_hour] = suspicious_activities
-
.group_by { |a| a.occurred_at.hour }
-
.transform_values(&:count)
-
-
# IP-based patterns
-
patterns[:by_ip] = suspicious_activities
-
.group(:ip_address)
-
.count
-
.sort_by { |_, count| -count }
-
.first(5)
-
.to_h
-
-
patterns
-
end
-
-
def slowest_actions(activities)
-
activities
-
.order(response_time: :desc)
-
.limit(10)
-
.map do |activity|
-
{
-
action: activity.full_action,
-
response_time_ms: (activity.response_time * 1000).round(2),
-
occurred_at: activity.occurred_at,
-
path: activity.request_path
-
}
-
end
-
end
-
-
def response_time_distribution(times)
-
return {} if times.empty?
-
-
# Convert to milliseconds
-
times_ms = times.map { |t| t * 1000 }
-
-
{
-
under_100ms: times_ms.count { |t| t < 100 },
-
'100_500ms': times_ms.count { |t| t >= 100 && t < 500 },
-
'500_1000ms': times_ms.count { |t| t >= 500 && t < 1000 },
-
over_1000ms: times_ms.count { |t| t >= 1000 }
-
}
-
end
-
-
def median(array)
-
return nil if array.empty?
-
-
sorted = array.sort
-
len = sorted.length
-
(sorted[(len - 1) / 2] + sorted[len / 2]) / 2.0
-
end
-
-
def generate_recommendations
-
recommendations = []
-
activities = user_activities
-
-
# Check for suspicious activity patterns
-
if activities.suspicious.count > 5
-
recommendations << {
-
type: 'security',
-
priority: 'high',
-
message: 'Multiple suspicious activities detected. Review security settings and consider enabling two-factor authentication.'
-
}
-
end
-
-
# Check for unusual access patterns
-
night_activities = activities.select { |a| a.occurred_at.hour.between?(0, 5) }
-
if night_activities.count > activities.count * 0.2
-
recommendations << {
-
type: 'security',
-
priority: 'medium',
-
message: 'Significant activity during unusual hours detected. Verify these accesses were authorized.'
-
}
-
end
-
-
# Check for multiple IP addresses
-
ip_count = activities.distinct.count(:ip_address)
-
if ip_count > 10
-
recommendations << {
-
type: 'security',
-
priority: 'medium',
-
message: "Activity from #{ip_count} different IP addresses. Consider reviewing access locations."
-
}
-
end
-
-
# Performance recommendations
-
slow_requests = activities.where('response_time > ?', 2.0)
-
if slow_requests.count > activities.count * 0.1
-
recommendations << {
-
type: 'performance',
-
priority: 'low',
-
message: 'More than 10% of requests are slow. Consider optimizing frequently accessed pages.'
-
}
-
end
-
-
recommendations
-
end
-
end
-
class BrandJourneyOrchestrator
-
# Simple facade for accessing brand-journey integration features
-
-
def self.generate_brand_aware_suggestions(journey:, user: nil, **options)
-
service = Journey::BrandIntegrationService.new(journey: journey, user: user)
-
service.orchestrate_brand_journey_flow(operation: :generate_suggestions, **options)
-
end
-
-
def self.validate_journey_brand_compliance(journey:, user: nil, **options)
-
service = Journey::BrandIntegrationService.new(journey: journey, user: user)
-
service.orchestrate_brand_journey_flow(operation: :validate_content, **options)
-
end
-
-
def self.enhance_journey_compliance(journey:, user: nil, **options)
-
service = Journey::BrandIntegrationService.new(journey: journey, user: user)
-
service.orchestrate_brand_journey_flow(operation: :auto_enhance_compliance, **options)
-
end
-
-
def self.analyze_brand_performance(journey:, user: nil, **options)
-
service = Journey::BrandIntegrationService.new(journey: journey, user: user)
-
service.orchestrate_brand_journey_flow(operation: :analyze_brand_performance, **options)
-
end
-
-
def self.sync_with_brand_updates(journey:, user: nil, **options)
-
service = Journey::BrandIntegrationService.new(journey: journey, user: user)
-
service.orchestrate_brand_journey_flow(operation: :sync_brand_updates, **options)
-
end
-
-
def self.check_integration_health(journey:, user: nil)
-
service = Journey::BrandIntegrationService.new(journey: journey, user: user)
-
service.integration_health_check
-
end
-
-
# Convenience methods for common operations
-
def self.quick_compliance_check(journey:)
-
return { score: 1.0, message: 'No brand associated' } unless journey.brand.present?
-
-
scores = journey.journey_steps.map(&:quick_compliance_score)
-
average_score = scores.sum / scores.length
-
-
{
-
score: average_score.round(3),
-
compliant_steps: scores.count { |s| s >= 0.7 },
-
total_steps: scores.length,
-
compliance_rate: (scores.count { |s| s >= 0.7 }.to_f / scores.length * 100).round(1)
-
}
-
end
-
-
def self.brand_integration_status(journey:)
-
return { integrated: false, reason: 'No brand associated' } unless journey.brand.present?
-
-
brand = journey.brand
-
integration_indicators = {
-
has_messaging_framework: brand.messaging_framework.present?,
-
has_active_guidelines: brand.brand_guidelines.active.any?,
-
has_voice_attributes: brand.brand_voice_attributes.present?,
-
recent_compliance_checks: journey.journey_insights.brand_compliance.recent(7).any?
-
}
-
-
integration_score = integration_indicators.values.count(true).to_f / integration_indicators.length
-
-
{
-
integrated: integration_score >= 0.5,
-
integration_score: integration_score.round(2),
-
indicators: integration_indicators,
-
status: integration_score >= 0.8 ? 'fully_integrated' :
-
integration_score >= 0.5 ? 'partially_integrated' : 'not_integrated'
-
}
-
end
-
end
-
module Branding
-
class AnalysisService
-
attr_reader :brand, :content, :options, :visual_assets
-
-
# Constants for analysis configuration
-
MAX_CONTENT_LENGTH = 50_000
-
CHUNK_SIZE = 4_000
-
MIN_CONTENT_LENGTH = 100
-
DEFAULT_CONFIDENCE_THRESHOLD = 0.7
-
-
# Analysis categories
-
VOICE_DIMENSIONS = {
-
formality: %w[very_formal formal neutral casual very_casual],
-
energy: %w[high_energy energetic balanced calm subdued],
-
warmth: %w[very_warm warm neutral cool professional],
-
authority: %w[commanding authoritative balanced approachable peer_level]
-
}.freeze
-
-
TONE_ATTRIBUTES = %w[
-
professional friendly authoritative conversational playful
-
serious inspiring educational empathetic bold innovative
-
trustworthy approachable technical sophisticated
-
].freeze
-
-
WRITING_STYLES = %w[
-
descriptive concise technical storytelling analytical
-
persuasive informative instructional narrative expository
-
].freeze
-
-
def initialize(brand, content = nil, options = {})
-
@brand = brand
-
@options = options
-
@content = content || aggregate_brand_content
-
@visual_assets = brand.brand_assets.where(asset_type: ['logo', 'image', 'visual'])
-
@llm_provider = options[:llm_provider] || determine_best_provider
-
end
-
-
def analyze
-
return { success: false, error: "Insufficient content for analysis" } if content.blank? || content.length < MIN_CONTENT_LENGTH
-
-
analysis = brand.brand_analyses.create!(
-
analysis_status: "processing",
-
analysis_data: { started_at: Time.current }
-
)
-
-
BrandAnalysisJob.perform_later(analysis.id)
-
-
{ success: true, analysis_id: analysis.id }
-
rescue StandardError => e
-
Rails.logger.error "Brand analysis error: #{e.message}\n#{e.backtrace.join("\n")}"
-
{ success: false, error: e.message }
-
end
-
-
def perform_analysis(analysis)
-
analysis.mark_as_processing!
-
-
begin
-
# Multi-stage analysis with chunking for large content
-
content_chunks = chunk_content(@content)
-
-
# Stage 1: Voice and tone analysis across all chunks
-
voice_attrs = analyze_voice_and_tone_comprehensive(content_chunks)
-
-
# Stage 2: Brand values extraction with context
-
brand_vals = extract_brand_values_with_context(content_chunks)
-
-
# Stage 3: Messaging pillars with examples
-
messaging_pillars = extract_messaging_pillars_detailed(content_chunks)
-
-
# Stage 4: Comprehensive guidelines extraction
-
guidelines = extract_guidelines_comprehensive(content_chunks)
-
-
# Stage 5: Visual brand analysis (if applicable)
-
visual_guide = analyze_visual_brand_elements
-
-
# Stage 6: Cross-reference and validate findings
-
validated_data = cross_validate_findings(
-
voice_attrs, brand_vals, messaging_pillars, guidelines
-
)
-
-
# Stage 7: Calculate comprehensive confidence score
-
confidence = calculate_comprehensive_confidence_score(validated_data)
-
-
# Update analysis with all findings
-
analysis.update!(
-
voice_attributes: validated_data[:voice_attributes],
-
brand_values: validated_data[:brand_values],
-
messaging_pillars: validated_data[:messaging_pillars],
-
extracted_rules: validated_data[:guidelines],
-
visual_guidelines: visual_guide,
-
confidence_score: confidence[:overall],
-
analysis_data: analysis.analysis_data.merge(
-
confidence_breakdown: confidence[:breakdown],
-
analysis_metadata: {
-
content_length: @content.length,
-
chunks_analyzed: content_chunks.size,
-
visual_assets_analyzed: @visual_assets.count,
-
llm_provider: @llm_provider,
-
completed_at: Time.current
-
}
-
),
-
analysis_status: "completed",
-
analyzed_at: Time.current
-
)
-
-
# Create actionable guidelines and frameworks
-
create_comprehensive_guidelines(analysis)
-
update_messaging_framework_detailed(analysis)
-
generate_brand_consistency_report(analysis)
-
-
true
-
rescue StandardError => e
-
Rails.logger.error "Analysis processing error: #{e.message}\n#{e.backtrace.join("\n")}"
-
analysis.mark_as_failed!("Analysis failed: #{e.message}")
-
false
-
end
-
end
-
-
private
-
-
def aggregate_brand_content
-
# Prioritize content by type and recency
-
content_sources = []
-
-
# Priority 1: Brand guidelines and style guides
-
guidelines_content = brand.brand_assets
-
.where(asset_type: ['style_guide', 'brand_guidelines', 'voice_guide'])
-
.processed
-
.pluck(:extracted_text, :metadata)
-
-
content_sources.concat(
-
guidelines_content.map { |text, meta|
-
{ content: text, priority: 1, source: meta['filename'] || 'Brand Guidelines' }
-
}
-
)
-
-
# Priority 2: Marketing materials and messaging docs
-
marketing_content = brand.brand_assets
-
.where(asset_type: ['marketing_material', 'messaging_doc', 'presentation'])
-
.processed
-
.pluck(:extracted_text, :metadata)
-
-
content_sources.concat(
-
marketing_content.map { |text, meta|
-
{ content: text, priority: 2, source: meta['filename'] || 'Marketing Material' }
-
}
-
)
-
-
# Priority 3: Website content and other materials
-
other_content = brand.brand_assets
-
.where.not(asset_type: ['style_guide', 'brand_guidelines', 'voice_guide',
-
'marketing_material', 'messaging_doc', 'presentation',
-
'logo', 'image', 'visual'])
-
.processed
-
.pluck(:extracted_text, :metadata)
-
-
content_sources.concat(
-
other_content.map { |text, meta|
-
{ content: text, priority: 3, source: meta['filename'] || 'Other Content' }
-
}
-
)
-
-
# Sort by priority and combine
-
@content_sources = content_sources.sort_by { |s| s[:priority] }
-
-
# Combine with priority weighting
-
combined_content = @content_sources.map { |source|
-
"\n\n[Source: #{source[:source]}]\n#{source[:content]}"
-
}.join("\n\n")
-
-
# Truncate if too long
-
combined_content.truncate(MAX_CONTENT_LENGTH)
-
end
-
-
def chunk_content(content)
-
return [content] if content.length <= CHUNK_SIZE
-
-
chunks = []
-
sentences = content.split(/(?<=[.!?])\s+/)
-
current_chunk = ""
-
-
sentences.each do |sentence|
-
if (current_chunk.length + sentence.length) > CHUNK_SIZE && current_chunk.present?
-
chunks << current_chunk.strip
-
current_chunk = sentence
-
else
-
current_chunk += " #{sentence}"
-
end
-
end
-
-
chunks << current_chunk.strip if current_chunk.present?
-
chunks
-
end
-
-
def determine_best_provider
-
# Prioritize providers based on capabilities and availability
-
if ENV['ANTHROPIC_API_KEY'].present?
-
'claude-3-opus-20240229' # Best for nuanced brand analysis
-
elsif ENV['OPENAI_API_KEY'].present?
-
'gpt-4-turbo-preview' # Good for structured output
-
else
-
'gpt-3.5-turbo' # Fallback option
-
end
-
end
-
-
def analyze_voice_and_tone_comprehensive(content_chunks)
-
# Analyze each chunk for voice consistency
-
chunk_analyses = content_chunks.map.with_index do |chunk, index|
-
prompt = build_comprehensive_voice_prompt(chunk, index, content_chunks.size)
-
response = llm_service.analyze(prompt, json_response: true)
-
parse_voice_response_safe(response)
-
end
-
-
# Aggregate and reconcile findings
-
aggregate_voice_attributes(chunk_analyses)
-
end
-
-
def build_comprehensive_voice_prompt(content, chunk_index, total_chunks)
-
<<~PROMPT
-
You are an expert brand voice analyst. Analyze this brand content (chunk #{chunk_index + 1} of #{total_chunks}) for voice and tone characteristics.
-
-
Content:
-
#{content}
-
-
Provide a detailed analysis in the following JSON structure:
-
{
-
"formality": {
-
"level": "one of: #{VOICE_DIMENSIONS[:formality].join(', ')}",
-
"score": 0.0-1.0,
-
"evidence": ["specific phrases showing formality level"],
-
"consistency": 0.0-1.0
-
},
-
"energy": {
-
"level": "one of: #{VOICE_DIMENSIONS[:energy].join(', ')}",
-
"score": 0.0-1.0,
-
"evidence": ["specific phrases showing energy level"]
-
},
-
"warmth": {
-
"level": "one of: #{VOICE_DIMENSIONS[:warmth].join(', ')}",
-
"score": 0.0-1.0,
-
"evidence": ["specific phrases showing warmth level"]
-
},
-
"authority": {
-
"level": "one of: #{VOICE_DIMENSIONS[:authority].join(', ')}",
-
"score": 0.0-1.0,
-
"evidence": ["specific phrases showing authority level"]
-
},
-
"tone": {
-
"primary": "main tone from: #{TONE_ATTRIBUTES.join(', ')}",
-
"secondary": ["2-3 secondary tones"],
-
"avoided": ["tones that are notably absent"],
-
"consistency": 0.0-1.0
-
},
-
"style": {
-
"writing": "primary style from: #{WRITING_STYLES.join(', ')}",
-
"sentence_structure": "simple/compound/complex/varied",
-
"vocabulary": "basic/intermediate/advanced/technical/mixed",
-
"paragraph_length": "short/medium/long/varied",
-
"active_passive_ratio": 0.0-1.0
-
},
-
"personality_traits": ["5-7 key personality descriptors"],
-
"linguistic_patterns": {
-
"common_phrases": ["frequently used phrases"],
-
"power_words": ["impactful words used"],
-
"transitions": ["common transition phrases"],
-
"openings": ["typical sentence/paragraph starters"],
-
"closings": ["typical ending patterns"]
-
},
-
"emotional_tone": {
-
"primary_emotion": "dominant emotional undertone",
-
"emotional_range": "narrow/moderate/wide",
-
"positivity_ratio": 0.0-1.0
-
}
-
}
-
-
Be specific and cite actual examples from the text. Focus on patterns, not isolated instances.
-
PROMPT
-
end
-
-
def parse_voice_response_safe(response)
-
return default_voice_attributes if response.blank?
-
-
begin
-
parsed = JSON.parse(response) rescue response
-
-
# Validate and clean the response
-
{
-
formality: validate_dimension(parsed['formality'], 'formality'),
-
energy: validate_dimension(parsed['energy'], 'energy'),
-
warmth: validate_dimension(parsed['warmth'], 'warmth'),
-
authority: validate_dimension(parsed['authority'], 'authority'),
-
tone: validate_tone(parsed['tone']),
-
style: validate_style(parsed['style']),
-
personality_traits: Array(parsed['personality_traits']).first(7),
-
linguistic_patterns: validate_patterns(parsed['linguistic_patterns']),
-
emotional_tone: validate_emotional_tone(parsed['emotional_tone'])
-
}
-
rescue => e
-
Rails.logger.error "Voice parsing error: #{e.message}"
-
default_voice_attributes
-
end
-
end
-
-
def validate_dimension(dimension_data, dimension_name)
-
return default_dimension(dimension_name) unless dimension_data.is_a?(Hash)
-
-
{
-
level: VOICE_DIMENSIONS[dimension_name.to_sym].include?(dimension_data['level']) ?
-
dimension_data['level'] : VOICE_DIMENSIONS[dimension_name.to_sym][2],
-
score: [dimension_data['score'].to_f, 1.0].min,
-
evidence: Array(dimension_data['evidence']).first(5),
-
consistency: dimension_data['consistency']&.to_f || 0.7
-
}
-
end
-
-
def validate_tone(tone_data)
-
return default_tone unless tone_data.is_a?(Hash)
-
-
{
-
primary: TONE_ATTRIBUTES.include?(tone_data['primary']) ?
-
tone_data['primary'] : 'professional',
-
secondary: Array(tone_data['secondary']).select { |t| TONE_ATTRIBUTES.include?(t) }.first(3),
-
avoided: Array(tone_data['avoided']),
-
consistency: tone_data['consistency']&.to_f || 0.7
-
}
-
end
-
-
def validate_style(style_data)
-
return default_style unless style_data.is_a?(Hash)
-
-
{
-
writing: WRITING_STYLES.include?(style_data['writing']) ?
-
style_data['writing'] : 'informative',
-
sentence_structure: style_data['sentence_structure'] || 'varied',
-
vocabulary: style_data['vocabulary'] || 'intermediate',
-
paragraph_length: style_data['paragraph_length'] || 'medium',
-
active_passive_ratio: style_data['active_passive_ratio']&.to_f || 0.8
-
}
-
end
-
-
def aggregate_voice_attributes(chunk_analyses)
-
# Remove any failed analyses
-
valid_analyses = chunk_analyses.reject { |a| a == default_voice_attributes }
-
-
return default_voice_attributes if valid_analyses.empty?
-
-
# Aggregate each dimension
-
aggregated = {
-
formality: aggregate_dimension(valid_analyses, :formality),
-
energy: aggregate_dimension(valid_analyses, :energy),
-
warmth: aggregate_dimension(valid_analyses, :warmth),
-
authority: aggregate_dimension(valid_analyses, :authority),
-
tone: aggregate_tone(valid_analyses),
-
style: aggregate_style(valid_analyses),
-
personality_traits: aggregate_personality_traits(valid_analyses),
-
linguistic_patterns: aggregate_patterns(valid_analyses),
-
emotional_tone: aggregate_emotional_tone(valid_analyses),
-
consistency_score: calculate_voice_consistency(valid_analyses)
-
}
-
-
aggregated
-
end
-
-
def aggregate_dimension(analyses, dimension)
-
dimensions = analyses.map { |a| a[dimension] }.compact
-
-
# Count frequency of each level
-
level_counts = dimensions.group_by { |d| d[:level] }
-
.transform_values(&:count)
-
-
# Most common level
-
primary_level = level_counts.max_by { |_, count| count }&.first
-
-
# Average score
-
avg_score = dimensions.map { |d| d[:score] }.sum.to_f / dimensions.size
-
-
# Collect all evidence
-
all_evidence = dimensions.flat_map { |d| d[:evidence] || [] }.uniq.first(10)
-
-
# Calculate consistency across chunks
-
consistency = calculate_dimension_consistency(dimensions)
-
-
{
-
level: primary_level,
-
score: avg_score.round(2),
-
evidence: all_evidence,
-
consistency: consistency,
-
distribution: level_counts
-
}
-
end
-
-
def extract_brand_values_with_context(content_chunks)
-
# Extract values from each chunk with context
-
chunk_values = content_chunks.map.with_index do |chunk, index|
-
prompt = build_brand_values_extraction_prompt(chunk, index, content_chunks.size)
-
response = llm_service.analyze(prompt, json_response: true)
-
parse_brand_values_response(response)
-
end
-
-
# Aggregate and rank by frequency and importance
-
aggregate_brand_values(chunk_values)
-
end
-
-
def build_brand_values_extraction_prompt(content, chunk_index, total_chunks)
-
<<~PROMPT
-
You are an expert brand strategist analyzing brand values. Examine this content (chunk #{chunk_index + 1} of #{total_chunks}) to identify core brand values.
-
-
Content:
-
#{content}
-
-
Identify brand values using this comprehensive approach:
-
-
1. EXPLICIT VALUES: Look for directly stated values, mission statements, or "what we believe" sections
-
2. IMPLIED VALUES: Infer values from:
-
- Repeated themes and concepts
-
- The way products/services are described
-
- How the brand talks about customers
-
- What the brand emphasizes or prioritizes
-
- Language choices and framing
-
-
3. BEHAVIORAL VALUES: Values demonstrated through:
-
- Actions described
-
- Commitments made
-
- Problems the brand chooses to solve
-
- How the brand differentiates itself
-
-
Return a JSON response with this structure:
-
{
-
"explicit_values": [
-
{
-
"value": "Innovation",
-
"evidence": "Direct quote or reference",
-
"context": "Where/how it was mentioned",
-
"strength": 0.0-1.0
-
}
-
],
-
"implied_values": [
-
{
-
"value": "Customer-centricity",
-
"evidence": "Patterns or themes observed",
-
"reasoning": "Why this value is implied",
-
"strength": 0.0-1.0
-
}
-
],
-
"behavioral_values": [
-
{
-
"value": "Sustainability",
-
"evidence": "Actions or commitments described",
-
"manifestation": "How it's demonstrated",
-
"strength": 0.0-1.0
-
}
-
],
-
"value_hierarchy": [
-
"Ordered list of values by importance based on emphasis"
-
],
-
"conflicting_values": [
-
{
-
"value1": "Speed",
-
"value2": "Perfection",
-
"explanation": "How these might conflict"
-
}
-
]
-
}
-
-
Focus on identifying 3-7 core values that truly define this brand. Be specific and cite evidence.
-
PROMPT
-
end
-
-
def parse_brand_values_response(response)
-
return default_brand_values_structure if response.blank?
-
-
begin
-
parsed = JSON.parse(response) rescue response
-
-
{
-
explicit_values: parse_value_list(parsed['explicit_values']),
-
implied_values: parse_value_list(parsed['implied_values']),
-
behavioral_values: parse_value_list(parsed['behavioral_values']),
-
value_hierarchy: Array(parsed['value_hierarchy']).first(7),
-
conflicting_values: Array(parsed['conflicting_values'])
-
}
-
rescue => e
-
Rails.logger.error "Brand values parsing error: #{e.message}"
-
default_brand_values_structure
-
end
-
end
-
-
def parse_value_list(values)
-
return [] unless values.is_a?(Array)
-
-
values.map do |value_data|
-
next unless value_data.is_a?(Hash)
-
-
{
-
value: value_data['value'],
-
evidence: value_data['evidence'],
-
context: value_data['context'] || value_data['reasoning'] || value_data['manifestation'],
-
strength: [value_data['strength'].to_f, 1.0].min
-
}
-
end.compact
-
end
-
-
def aggregate_brand_values(chunk_values)
-
all_values = {
-
explicit: [],
-
implied: [],
-
behavioral: []
-
}
-
-
# Collect all values across chunks
-
chunk_values.each do |chunk|
-
all_values[:explicit].concat(chunk[:explicit_values] || [])
-
all_values[:implied].concat(chunk[:implied_values] || [])
-
all_values[:behavioral].concat(chunk[:behavioral_values] || [])
-
end
-
-
# Group by value name and aggregate
-
aggregated_values = {}
-
-
[:explicit, :implied, :behavioral].each do |type|
-
all_values[type].group_by { |v| v[:value]&.downcase }
-
.each do |value_name, instances|
-
next if value_name.blank?
-
-
aggregated_values[value_name] ||= {
-
value: instances.first[:value], # Original case
-
type: type,
-
frequency: 0,
-
total_strength: 0,
-
evidence: [],
-
contexts: []
-
}
-
-
aggregated_values[value_name][:frequency] += instances.size
-
aggregated_values[value_name][:total_strength] += instances.sum { |i| i[:strength] }
-
aggregated_values[value_name][:evidence].concat(instances.map { |i| i[:evidence] }.compact)
-
aggregated_values[value_name][:contexts].concat(instances.map { |i| i[:context] }.compact)
-
end
-
end
-
-
# Calculate final scores and rank
-
final_values = aggregated_values.values.map do |value_data|
-
avg_strength = value_data[:total_strength] / value_data[:frequency]
-
-
# Boost score for explicit values and frequency
-
type_weight = case value_data[:type]
-
when :explicit then 1.2
-
when :behavioral then 1.1
-
else 1.0
-
end
-
-
frequency_weight = Math.log(value_data[:frequency] + 1) / Math.log(chunk_values.size + 1)
-
-
final_score = (avg_strength * type_weight * (0.7 + 0.3 * frequency_weight))
-
-
{
-
name: value_data[:value],
-
score: final_score.round(3),
-
type: value_data[:type],
-
frequency: value_data[:frequency],
-
evidence: value_data[:evidence].uniq.first(5),
-
contexts: value_data[:contexts].uniq.first(3)
-
}
-
end
-
-
# Sort by score and take top values
-
final_values.sort_by { |v| -v[:score] }.first(7)
-
end
-
-
def default_brand_values_structure
-
{
-
explicit_values: [],
-
implied_values: [],
-
behavioral_values: [],
-
value_hierarchy: [],
-
conflicting_values: []
-
}
-
end
-
-
def extract_messaging_pillars_detailed(content_chunks)
-
# Extract pillars from each chunk
-
chunk_pillars = content_chunks.map.with_index do |chunk, index|
-
prompt = build_messaging_pillars_extraction_prompt(chunk, index, content_chunks.size)
-
response = llm_service.analyze(prompt, json_response: true)
-
parse_messaging_pillars_response(response)
-
end
-
-
# Aggregate and structure pillars
-
aggregate_messaging_pillars(chunk_pillars)
-
end
-
-
def build_messaging_pillars_extraction_prompt(content, chunk_index, total_chunks)
-
<<~PROMPT
-
You are an expert messaging strategist. Analyze this brand content (chunk #{chunk_index + 1} of #{total_chunks}) to identify key messaging pillars.
-
-
Content:
-
#{content}
-
-
Identify messaging pillars - the core themes that support all brand communications. Look for:
-
-
1. RECURRING THEMES: Topics or concepts that appear multiple times
-
2. VALUE PROPOSITIONS: Key benefits or advantages emphasized
-
3. DIFFERENTIATORS: What makes this brand unique
-
4. AUDIENCE BENEFITS: How the brand helps its customers
-
5. PROOF POINTS: Evidence, features, or capabilities that support claims
-
-
Return a JSON response with this structure:
-
{
-
"pillars": [
-
{
-
"name": "Clear, descriptive pillar name",
-
"description": "What this pillar represents",
-
"key_messages": [
-
"Specific messages under this pillar"
-
],
-
"supporting_points": [
-
"Facts, features, or benefits that support this pillar"
-
],
-
"target_emotion": "What feeling this pillar aims to evoke",
-
"evidence": [
-
"Quotes or references from the content"
-
],
-
"frequency": 1-10,
-
"importance": 1-10
-
}
-
],
-
"pillar_relationships": [
-
{
-
"pillar1": "Name of first pillar",
-
"pillar2": "Name of second pillar",
-
"relationship": "How these pillars connect or support each other"
-
}
-
],
-
"missing_pillars": [
-
{
-
"suggested_pillar": "What might be missing",
-
"rationale": "Why this could strengthen the messaging"
-
}
-
]
-
}
-
-
Identify 3-5 main pillars that form the foundation of this brand's messaging.
-
PROMPT
-
end
-
-
def parse_messaging_pillars_response(response)
-
return default_pillars_structure if response.blank?
-
-
begin
-
parsed = JSON.parse(response) rescue response
-
-
{
-
pillars: parse_pillars_list(parsed['pillars']),
-
relationships: Array(parsed['pillar_relationships']),
-
missing: Array(parsed['missing_pillars'])
-
}
-
rescue => e
-
Rails.logger.error "Messaging pillars parsing error: #{e.message}"
-
default_pillars_structure
-
end
-
end
-
-
def parse_pillars_list(pillars)
-
return [] unless pillars.is_a?(Array)
-
-
pillars.map do |pillar|
-
next unless pillar.is_a?(Hash)
-
-
{
-
name: pillar['name'],
-
description: pillar['description'],
-
key_messages: Array(pillar['key_messages']).first(5),
-
supporting_points: Array(pillar['supporting_points']).first(5),
-
target_emotion: pillar['target_emotion'],
-
evidence: Array(pillar['evidence']).first(3),
-
frequency: [pillar['frequency'].to_i, 10].min,
-
importance: [pillar['importance'].to_i, 10].min
-
}
-
end.compact
-
end
-
-
def aggregate_messaging_pillars(chunk_pillars)
-
all_pillars = {}
-
all_relationships = []
-
-
# Collect all pillars
-
chunk_pillars.each do |chunk|
-
chunk[:pillars].each do |pillar|
-
key = pillar[:name]&.downcase&.strip
-
next if key.blank?
-
-
all_pillars[key] ||= {
-
name: pillar[:name],
-
description: [],
-
key_messages: [],
-
supporting_points: [],
-
target_emotions: [],
-
evidence: [],
-
total_frequency: 0,
-
total_importance: 0,
-
occurrences: 0
-
}
-
-
all_pillars[key][:description] << pillar[:description]
-
all_pillars[key][:key_messages].concat(pillar[:key_messages] || [])
-
all_pillars[key][:supporting_points].concat(pillar[:supporting_points] || [])
-
all_pillars[key][:target_emotions] << pillar[:target_emotion]
-
all_pillars[key][:evidence].concat(pillar[:evidence] || [])
-
all_pillars[key][:total_frequency] += pillar[:frequency]
-
all_pillars[key][:total_importance] += pillar[:importance]
-
all_pillars[key][:occurrences] += 1
-
end
-
-
all_relationships.concat(chunk[:relationships] || [])
-
end
-
-
# Process and rank pillars
-
processed_pillars = all_pillars.map do |key, data|
-
avg_frequency = data[:total_frequency].to_f / data[:occurrences]
-
avg_importance = data[:total_importance].to_f / data[:occurrences]
-
occurrence_weight = Math.log(data[:occurrences] + 1) / Math.log(chunk_pillars.size + 1)
-
-
score = (avg_frequency * 0.3 + avg_importance * 0.5 + occurrence_weight * 10 * 0.2)
-
-
{
-
name: data[:name],
-
description: most_representative(data[:description]),
-
key_messages: deduplicate_and_rank(data[:key_messages], 5),
-
supporting_points: deduplicate_and_rank(data[:supporting_points], 7),
-
target_emotion: most_common(data[:target_emotions].compact),
-
evidence: data[:evidence].uniq.first(5),
-
strength_score: score.round(2),
-
consistency_score: (data[:occurrences].to_f / chunk_pillars.size).round(2)
-
}
-
end
-
-
# Sort by score and take top pillars
-
top_pillars = processed_pillars.sort_by { |p| -p[:strength_score] }.first(5)
-
-
# Process relationships for top pillars
-
pillar_names = top_pillars.map { |p| p[:name].downcase }
-
relevant_relationships = all_relationships.select do |rel|
-
pillar_names.include?(rel['pillar1']&.downcase) &&
-
pillar_names.include?(rel['pillar2']&.downcase)
-
end.uniq
-
-
{
-
pillars: top_pillars,
-
relationships: relevant_relationships,
-
pillar_hierarchy: create_pillar_hierarchy(top_pillars, relevant_relationships)
-
}
-
end
-
-
def most_representative(descriptions)
-
# Find the most complete/representative description
-
descriptions.compact.max_by(&:length) || ""
-
end
-
-
def deduplicate_and_rank(items, limit)
-
# Remove duplicates and rank by frequency
-
items.group_by { |item| item.downcase.strip }
-
.sort_by { |_, instances| -instances.size }
-
.first(limit)
-
.map { |_, instances| instances.first }
-
end
-
-
def create_pillar_hierarchy(pillars, relationships)
-
# Create a simple hierarchy based on scores and relationships
-
{
-
primary: pillars.first(2).map { |p| p[:name] },
-
supporting: pillars[2..-1]&.map { |p| p[:name] } || [],
-
connections: relationships.map { |r|
-
"#{r['pillar1']} + #{r['pillar2']}: #{r['relationship']}"
-
}
-
}
-
end
-
-
def default_pillars_structure
-
{
-
pillars: [],
-
relationships: [],
-
missing: []
-
}
-
end
-
-
def extract_guidelines_comprehensive(content_chunks)
-
# Extract guidelines from each chunk with categorization
-
chunk_guidelines = content_chunks.map.with_index do |chunk, index|
-
prompt = build_comprehensive_guidelines_prompt(chunk, index, content_chunks.size)
-
response = llm_service.analyze(prompt, json_response: true)
-
parse_guidelines_response(response)
-
end
-
-
# Aggregate and categorize guidelines
-
aggregate_guidelines(chunk_guidelines)
-
end
-
-
def build_comprehensive_guidelines_prompt(content, chunk_index, total_chunks)
-
<<~PROMPT
-
You are an expert brand guidelines analyst. Extract all brand rules, guidelines, and requirements from this content (chunk #{chunk_index + 1} of #{total_chunks}).
-
-
Content:
-
#{content}
-
-
Extract guidelines in these categories:
-
-
1. VOICE & TONE RULES:
-
- How to speak/write
-
- Tone requirements
-
- Voice characteristics to maintain
-
- Language do's and don'ts
-
-
2. MESSAGING RULES:
-
- What to communicate
-
- Key messages to include
-
- Topics to avoid
-
- Claims restrictions
-
-
3. VISUAL RULES:
-
- Color usage
-
- Typography requirements
-
- Logo usage
-
- Image style
-
-
4. GRAMMAR & STYLE:
-
- Punctuation rules
-
- Capitalization
-
- Formatting requirements
-
- Writing conventions
-
-
5. BRAND BEHAVIOR:
-
- How the brand should act
-
- Customer interaction guidelines
-
- Response patterns
-
- Ethics and values in practice
-
-
Return a JSON response with this structure:
-
{
-
"voice_tone_rules": {
-
"must_do": ["Required voice/tone elements"],
-
"should_do": ["Recommended practices"],
-
"must_not_do": ["Prohibited voice/tone elements"],
-
"examples": {
-
"good": ["Examples of correct usage"],
-
"bad": ["Examples to avoid"]
-
}
-
},
-
"messaging_rules": {
-
"required_elements": ["Must-include messages"],
-
"key_phrases": ["Specific phrases to use"],
-
"prohibited_topics": ["Topics/claims to avoid"],
-
"competitor_mentions": "Guidelines for mentioning competitors"
-
},
-
"visual_rules": {
-
"colors": {
-
"primary": ["#hex codes"],
-
"secondary": ["#hex codes"],
-
"usage_rules": ["When/how to use colors"]
-
},
-
"typography": {
-
"fonts": ["Font names and weights"],
-
"sizes": ["Size specifications"],
-
"usage_rules": ["When to use which fonts"]
-
},
-
"imagery": {
-
"style": "Description of image style",
-
"do": ["Image requirements"],
-
"dont": ["Image restrictions"]
-
}
-
},
-
"grammar_style_rules": {
-
"punctuation": ["Specific punctuation rules"],
-
"capitalization": ["What to capitalize"],
-
"formatting": ["Format requirements"],
-
"preferred_terms": {"use_this": "not_that"}
-
},
-
"behavioral_rules": {
-
"customer_interaction": ["How to interact with customers"],
-
"response_patterns": ["How to respond to situations"],
-
"ethical_guidelines": ["Ethical considerations"]
-
},
-
"rule_priority": [
-
{
-
"rule": "Most important rule",
-
"category": "Which category",
-
"importance": 1-10,
-
"consequences": "What happens if violated"
-
}
-
]
-
}
-
-
Be specific and extract actual rules, not general observations.
-
PROMPT
-
end
-
-
def parse_guidelines_response(response)
-
return default_guidelines_structure if response.blank?
-
-
begin
-
parsed = JSON.parse(response) rescue response
-
-
{
-
voice_tone_rules: parse_rule_category(parsed['voice_tone_rules']),
-
messaging_rules: parse_rule_category(parsed['messaging_rules']),
-
visual_rules: parse_visual_rules(parsed['visual_rules']),
-
grammar_style_rules: parse_rule_category(parsed['grammar_style_rules']),
-
behavioral_rules: parse_rule_category(parsed['behavioral_rules']),
-
rule_priority: parse_rule_priorities(parsed['rule_priority'])
-
}
-
rescue => e
-
Rails.logger.error "Guidelines parsing error: #{e.message}"
-
default_guidelines_structure
-
end
-
end
-
-
def parse_rule_category(category_data)
-
return {} unless category_data.is_a?(Hash)
-
-
category_data.transform_values do |value|
-
case value
-
when Array then value.first(10)
-
when Hash then value
-
when String then value
-
else []
-
end
-
end
-
end
-
-
def parse_visual_rules(visual_data)
-
return {} unless visual_data.is_a?(Hash)
-
-
{
-
colors: parse_color_rules(visual_data['colors']),
-
typography: parse_typography_rules(visual_data['typography']),
-
imagery: parse_imagery_rules(visual_data['imagery'])
-
}
-
end
-
-
def parse_color_rules(color_data)
-
return {} unless color_data.is_a?(Hash)
-
-
{
-
primary: Array(color_data['primary']).select { |c| c =~ /^#[0-9A-Fa-f]{6}$/ },
-
secondary: Array(color_data['secondary']).select { |c| c =~ /^#[0-9A-Fa-f]{6}$/ },
-
usage_rules: Array(color_data['usage_rules'])
-
}
-
end
-
-
def parse_typography_rules(typography_data)
-
return {} unless typography_data.is_a?(Hash)
-
-
{
-
fonts: Array(typography_data['fonts']),
-
sizes: Array(typography_data['sizes']),
-
usage_rules: Array(typography_data['usage_rules'])
-
}
-
end
-
-
def parse_imagery_rules(imagery_data)
-
return {} unless imagery_data.is_a?(Hash)
-
-
{
-
style: imagery_data['style'] || '',
-
do: Array(imagery_data['do']),
-
dont: Array(imagery_data['dont'])
-
}
-
end
-
-
def parse_rule_priorities(priorities)
-
return [] unless priorities.is_a?(Array)
-
-
priorities.map do |priority|
-
next unless priority.is_a?(Hash)
-
-
{
-
rule: priority['rule'],
-
category: priority['category'],
-
importance: [priority['importance'].to_i, 10].min,
-
consequences: priority['consequences']
-
}
-
end.compact.first(10)
-
end
-
-
def aggregate_guidelines(chunk_guidelines)
-
aggregated = {
-
voice_tone_rules: aggregate_rule_category(chunk_guidelines, :voice_tone_rules),
-
messaging_rules: aggregate_rule_category(chunk_guidelines, :messaging_rules),
-
visual_rules: aggregate_visual_rules(chunk_guidelines),
-
grammar_style_rules: aggregate_rule_category(chunk_guidelines, :grammar_style_rules),
-
behavioral_rules: aggregate_rule_category(chunk_guidelines, :behavioral_rules),
-
rule_priorities: aggregate_priorities(chunk_guidelines),
-
rule_consistency: calculate_rule_consistency(chunk_guidelines)
-
}
-
-
# Detect and resolve conflicts
-
aggregated[:conflicts] = detect_rule_conflicts(aggregated)
-
-
aggregated
-
end
-
-
def aggregate_rule_category(guidelines, category)
-
all_rules = {
-
must_do: [],
-
should_do: [],
-
must_not_do: [],
-
examples: { good: [], bad: [] }
-
}
-
-
guidelines.each do |chunk|
-
category_data = chunk[category] || {}
-
-
all_rules[:must_do].concat(Array(category_data['must_do']))
-
all_rules[:should_do].concat(Array(category_data['should_do']))
-
all_rules[:must_not_do].concat(Array(category_data['must_not_do']))
-
-
if category_data['examples'].is_a?(Hash)
-
all_rules[:examples][:good].concat(Array(category_data['examples']['good']))
-
all_rules[:examples][:bad].concat(Array(category_data['examples']['bad']))
-
end
-
end
-
-
# Deduplicate and prioritize
-
{
-
must_do: deduplicate_rules(all_rules[:must_do]),
-
should_do: deduplicate_rules(all_rules[:should_do]),
-
must_not_do: deduplicate_rules(all_rules[:must_not_do]),
-
examples: {
-
good: all_rules[:examples][:good].uniq.first(5),
-
bad: all_rules[:examples][:bad].uniq.first(5)
-
}
-
}
-
end
-
-
def deduplicate_rules(rules)
-
# Group similar rules and take the most detailed version
-
rules.group_by { |rule| rule.downcase.split.first(3).join(' ') }
-
.map { |_, group| group.max_by(&:length) }
-
.uniq
-
.first(15)
-
end
-
-
def aggregate_visual_rules(guidelines)
-
all_colors = { primary: [], secondary: [] }
-
all_fonts = []
-
all_imagery = { style: [], do: [], dont: [] }
-
-
guidelines.each do |chunk|
-
visual = chunk[:visual_rules] || {}
-
-
if visual[:colors]
-
all_colors[:primary].concat(visual[:colors][:primary] || [])
-
all_colors[:secondary].concat(visual[:colors][:secondary] || [])
-
end
-
-
if visual[:typography]
-
all_fonts.concat(visual[:typography][:fonts] || [])
-
end
-
-
if visual[:imagery]
-
all_imagery[:style] << visual[:imagery][:style] if visual[:imagery][:style].present?
-
all_imagery[:do].concat(visual[:imagery][:do] || [])
-
all_imagery[:dont].concat(visual[:imagery][:dont] || [])
-
end
-
end
-
-
{
-
colors: {
-
primary: all_colors[:primary].uniq,
-
secondary: all_colors[:secondary].uniq
-
},
-
typography: {
-
fonts: all_fonts.uniq
-
},
-
imagery: {
-
style: all_imagery[:style].join('; '),
-
do: all_imagery[:do].uniq.first(10),
-
dont: all_imagery[:dont].uniq.first(10)
-
}
-
}
-
end
-
-
def aggregate_priorities(guidelines)
-
all_priorities = guidelines.flat_map { |g| g[:rule_priorities] || [] }
-
-
# Group by rule and average importance
-
grouped = all_priorities.group_by { |p| p[:rule]&.downcase }
-
-
priorities = grouped.map do |rule, instances|
-
avg_importance = instances.map { |i| i[:importance] }.sum.to_f / instances.size
-
-
{
-
rule: instances.first[:rule],
-
category: most_common(instances.map { |i| i[:category] }),
-
importance: avg_importance.round,
-
consequences: instances.first[:consequences],
-
frequency: instances.size
-
}
-
end
-
-
priorities.sort_by { |p| [-p[:importance], -p[:frequency]] }.first(20)
-
end
-
-
def calculate_rule_consistency(guidelines)
-
# Measure how consistent rules are across chunks
-
return 1.0 if guidelines.size <= 1
-
-
rule_categories = [:voice_tone_rules, :messaging_rules, :grammar_style_rules]
-
consistency_scores = []
-
-
rule_categories.each do |category|
-
all_must_rules = guidelines.map { |g|
-
(g[category][:must_do] || []).map(&:downcase)
-
}
-
-
if all_must_rules.flatten.any?
-
# Check overlap between chunks
-
common_rules = all_must_rules.reduce(:&) || []
-
total_unique = all_must_rules.flatten.uniq.size
-
-
consistency = common_rules.size.to_f / total_unique
-
consistency_scores << consistency
-
end
-
end
-
-
consistency_scores.empty? ? 0.5 : (consistency_scores.sum / consistency_scores.size).round(2)
-
end
-
-
def detect_rule_conflicts(aggregated)
-
conflicts = []
-
-
# Check for contradictions between must_do and must_not_do
-
[:voice_tone_rules, :messaging_rules, :behavioral_rules].each do |category|
-
must_do = aggregated[category][:must_do] || []
-
must_not = aggregated[category][:must_not_do] || []
-
-
must_do.each do |do_rule|
-
must_not.each do |dont_rule|
-
if rules_conflict?(do_rule, dont_rule)
-
conflicts << {
-
category: category,
-
rule1: do_rule,
-
rule2: dont_rule,
-
type: 'direct_contradiction'
-
}
-
end
-
end
-
end
-
end
-
-
conflicts
-
end
-
-
def rules_conflict?(rule1, rule2)
-
# Simple conflict detection - can be made more sophisticated
-
keywords1 = rule1.downcase.split(/\W+/)
-
keywords2 = rule2.downcase.split(/\W+/)
-
-
# Check for opposite actions on same subject
-
common_keywords = keywords1 & keywords2
-
common_keywords.size > 2
-
end
-
-
def default_guidelines_structure
-
{
-
voice_tone_rules: {},
-
messaging_rules: {},
-
visual_rules: {},
-
grammar_style_rules: {},
-
behavioral_rules: {},
-
rule_priority: []
-
}
-
end
-
-
def analyze_visual_brand_elements
-
return {} if @visual_assets.empty?
-
-
visual_analysis = {
-
colors: extract_colors_from_assets,
-
typography: extract_typography_from_assets,
-
imagery: analyze_imagery_style,
-
logo_usage: analyze_logo_usage,
-
visual_consistency: calculate_visual_consistency
-
}
-
-
# If we have style guides, enhance with explicit rules
-
style_guides = @visual_assets.where(asset_type: 'style_guide')
-
if style_guides.any?
-
enhance_visual_analysis_with_guides(visual_analysis, style_guides)
-
end
-
-
visual_analysis
-
end
-
-
def extract_colors_from_assets
-
colors = {
-
primary: [],
-
secondary: [],
-
accent: [],
-
neutral: []
-
}
-
-
# Analyze logos and visual assets for color extraction
-
@visual_assets.where(asset_type: ['logo', 'image']).each do |asset|
-
if asset.metadata['dominant_colors'].present?
-
colors[:primary].concat(asset.metadata['dominant_colors'].first(2))
-
colors[:secondary].concat(asset.metadata['dominant_colors'][2..4] || [])
-
end
-
end
-
-
# Process and deduplicate colors
-
{
-
primary: cluster_similar_colors(colors[:primary]).first(3),
-
secondary: cluster_similar_colors(colors[:secondary]).first(4),
-
accent: detect_accent_colors(colors),
-
neutral: detect_neutral_colors(colors),
-
color_relationships: analyze_color_relationships(colors)
-
}
-
end
-
-
def cluster_similar_colors(colors)
-
# Group similar colors together
-
# This is a simplified version - in production, use proper color distance algorithms
-
colors.uniq.sort_by { |color| color.downcase }
-
end
-
-
def detect_accent_colors(colors)
-
# Detect high-saturation colors used sparingly
-
[]
-
end
-
-
def detect_neutral_colors(colors)
-
# Detect grays, blacks, whites
-
['#FFFFFF', '#F5F5F5', '#E5E5E5', '#333333', '#000000']
-
end
-
-
def analyze_color_relationships(colors)
-
{
-
primary_usage: "Headers, CTAs, brand elements",
-
secondary_usage: "Supporting elements, backgrounds",
-
contrast_ratios: "Ensures accessibility"
-
}
-
end
-
-
def extract_typography_from_assets
-
typography = {
-
fonts: [],
-
weights: [],
-
sizes: []
-
}
-
-
# Extract from metadata if available
-
@visual_assets.each do |asset|
-
if asset.metadata['fonts'].present?
-
typography[:fonts].concat(Array(asset.metadata['fonts']))
-
end
-
end
-
-
# Return structured typography data
-
{
-
primary_font: typography[:fonts].first || "System Default",
-
secondary_font: typography[:fonts].second,
-
heading_hierarchy: {
-
h1: { size: "48px", weight: "bold" },
-
h2: { size: "36px", weight: "semibold" },
-
h3: { size: "24px", weight: "semibold" },
-
h4: { size: "20px", weight: "medium" }
-
},
-
body_text: {
-
size: "16px",
-
line_height: "1.5",
-
weight: "regular"
-
}
-
}
-
end
-
-
def analyze_imagery_style
-
image_assets = @visual_assets.where(asset_type: 'image')
-
-
return {} if image_assets.empty?
-
-
{
-
style_characteristics: determine_image_style(image_assets),
-
common_subjects: extract_image_subjects(image_assets),
-
color_treatment: analyze_image_color_treatment(image_assets),
-
composition_patterns: analyze_composition(image_assets)
-
}
-
end
-
-
def determine_image_style(assets)
-
# Analyze metadata for style patterns
-
styles = []
-
-
assets.each do |asset|
-
if asset.metadata['style'].present?
-
styles << asset.metadata['style']
-
end
-
end
-
-
# Return most common styles
-
{
-
primary_style: most_common(styles) || "modern",
-
characteristics: ["clean", "professional", "vibrant"]
-
}
-
end
-
-
def analyze_logo_usage
-
logo_assets = @visual_assets.where(asset_type: 'logo')
-
-
return {} unless logo_assets.any?
-
-
{
-
variations: logo_assets.pluck(:metadata).map { |m| m['variation'] }.compact.uniq,
-
clear_space: "Minimum clear space equal to 'x' height",
-
minimum_size: "No smaller than 24px height for digital",
-
backgrounds: {
-
preferred: "White or light backgrounds",
-
acceptable: "Brand colors with sufficient contrast",
-
prohibited: "Busy patterns or low contrast"
-
}
-
}
-
end
-
-
def calculate_visual_consistency
-
# Measure consistency across visual assets
-
consistency_factors = []
-
-
# Color consistency
-
if @visual_assets.any? { |a| a.metadata['dominant_colors'].present? }
-
color_variations = @visual_assets.map { |a| a.metadata['dominant_colors'] }.compact
-
consistency_factors << calculate_color_consistency(color_variations)
-
end
-
-
# Style consistency
-
if @visual_assets.any? { |a| a.metadata['style'].present? }
-
styles = @visual_assets.map { |a| a.metadata['style'] }.compact
-
consistency_factors << calculate_style_consistency(styles)
-
end
-
-
consistency_factors.empty? ? 0.7 : (consistency_factors.sum / consistency_factors.size).round(2)
-
end
-
-
def calculate_color_consistency(color_sets)
-
# Measure how consistent colors are across assets
-
0.8 # Simplified - implement proper color distance calculation
-
end
-
-
def calculate_style_consistency(styles)
-
# Measure style consistency
-
unique_styles = styles.uniq.size
-
total_styles = styles.size
-
-
1.0 - (unique_styles - 1).to_f / total_styles
-
end
-
-
def enhance_visual_analysis_with_guides(analysis, guides)
-
guides.each do |guide|
-
# Extract explicit rules from style guide text
-
if guide.extracted_text.present?
-
extracted_rules = extract_visual_rules_from_text(guide.extracted_text)
-
-
# Merge with analyzed data
-
analysis[:colors].merge!(extracted_rules[:colors]) if extracted_rules[:colors]
-
analysis[:typography].merge!(extracted_rules[:typography]) if extracted_rules[:typography]
-
analysis[:imagery].merge!(extracted_rules[:imagery]) if extracted_rules[:imagery]
-
end
-
end
-
-
analysis
-
end
-
-
def extract_visual_rules_from_text(text)
-
# Use LLM to extract specific visual rules from style guide text
-
prompt = build_visual_extraction_prompt(text)
-
response = llm_service.analyze(prompt, json_response: true)
-
-
parse_visual_rules_response(response)
-
end
-
-
def build_visual_extraction_prompt(text)
-
<<~PROMPT
-
Extract specific visual brand guidelines from this style guide text:
-
-
#{text[0..3000]}
-
-
Extract:
-
1. Color codes (hex, RGB, CMYK)
-
2. Font names and specifications
-
3. Logo usage rules
-
4. Image style requirements
-
5. Spacing and layout rules
-
-
Return as structured JSON.
-
PROMPT
-
end
-
-
def parse_visual_rules_response(response)
-
# Parse LLM response for visual rules
-
{}
-
end
-
-
def default_voice_attributes
-
{
-
formality: default_dimension(:formality),
-
energy: default_dimension(:energy),
-
warmth: default_dimension(:warmth),
-
authority: default_dimension(:authority),
-
tone: default_tone,
-
style: default_style,
-
personality_traits: [],
-
linguistic_patterns: {},
-
emotional_tone: {}
-
}
-
end
-
-
def default_dimension(name)
-
{
-
level: VOICE_DIMENSIONS[name][2], # middle value
-
score: 0.5,
-
evidence: [],
-
consistency: 0.5
-
}
-
end
-
-
def default_tone
-
{
-
primary: 'professional',
-
secondary: [],
-
avoided: [],
-
consistency: 0.5
-
}
-
end
-
-
def default_style
-
{
-
writing: 'informative',
-
sentence_structure: 'varied',
-
vocabulary: 'intermediate',
-
paragraph_length: 'medium',
-
active_passive_ratio: 0.7
-
}
-
end
-
-
def calculate_dimension_consistency(dimensions)
-
return 1.0 if dimensions.size <= 1
-
-
# Check how consistent the level is across chunks
-
levels = dimensions.map { |d| d[:level] }
-
unique_levels = levels.uniq
-
-
# Perfect consistency = 1 unique level
-
# Worst consistency = all different levels
-
consistency = 1.0 - (unique_levels.size - 1).to_f / (VOICE_DIMENSIONS.values.first.size - 1)
-
consistency.round(2)
-
end
-
-
def calculate_voice_consistency(analyses)
-
# Overall consistency across all dimensions
-
dimension_consistencies = [:formality, :energy, :warmth, :authority].map do |dim|
-
analyses.first[dim][:consistency] || 0.5
-
end
-
-
(dimension_consistencies.sum / dimension_consistencies.size).round(2)
-
end
-
-
def aggregate_tone(analyses)
-
# Collect all tone data
-
all_primary = analyses.map { |a| a[:tone][:primary] }
-
all_secondary = analyses.flat_map { |a| a[:tone][:secondary] || [] }
-
all_avoided = analyses.flat_map { |a| a[:tone][:avoided] || [] }
-
-
# Count frequencies
-
primary_counts = all_primary.group_by(&:itself).transform_values(&:count)
-
secondary_counts = all_secondary.group_by(&:itself).transform_values(&:count)
-
-
{
-
primary: primary_counts.max_by { |_, count| count }&.first || 'professional',
-
secondary: secondary_counts.sort_by { |_, count| -count }
-
.first(3)
-
.map(&:first),
-
avoided: all_avoided.group_by(&:itself)
-
.select { |_, instances| instances.size > 1 }
-
.keys,
-
consistency: calculate_tone_consistency(analyses),
-
distribution: primary_counts
-
}
-
end
-
-
def calculate_tone_consistency(analyses)
-
primary_tones = analyses.map { |a| a[:tone][:primary] }
-
unique_primary = primary_tones.uniq
-
-
# More consistent if fewer unique primary tones
-
1.0 - (unique_primary.size - 1).to_f / analyses.size
-
end
-
-
def aggregate_style(analyses)
-
styles = analyses.map { |a| a[:style] }.compact
-
-
{
-
writing: most_common(styles.map { |s| s[:writing] }),
-
sentence_structure: most_common(styles.map { |s| s[:sentence_structure] }),
-
vocabulary: most_common(styles.map { |s| s[:vocabulary] }),
-
paragraph_length: most_common(styles.map { |s| s[:paragraph_length] }),
-
active_passive_ratio: (styles.map { |s| s[:active_passive_ratio] }.sum / styles.size).round(2)
-
}
-
end
-
-
def aggregate_personality_traits(analyses)
-
all_traits = analyses.flat_map { |a| a[:personality_traits] || [] }
-
trait_counts = all_traits.group_by(&:downcase).transform_values(&:count)
-
-
# Sort by frequency and take top traits
-
trait_counts.sort_by { |_, count| -count }
-
.first(7)
-
.map { |trait, count|
-
{
-
trait: all_traits.find { |t| t.downcase == trait },
-
frequency: count,
-
strength: count.to_f / analyses.size
-
}
-
}
-
end
-
-
def aggregate_patterns(analyses)
-
patterns = {
-
common_phrases: [],
-
power_words: [],
-
transitions: [],
-
openings: [],
-
closings: []
-
}
-
-
analyses.each do |analysis|
-
next unless analysis[:linguistic_patterns].is_a?(Hash)
-
-
analysis[:linguistic_patterns].each do |key, values|
-
patterns[key.to_sym] ||= []
-
patterns[key.to_sym].concat(Array(values))
-
end
-
end
-
-
# Deduplicate and count frequencies
-
patterns.transform_values do |values|
-
values.group_by(&:downcase)
-
.select { |_, instances| instances.size > 1 }
-
.sort_by { |_, instances| -instances.size }
-
.first(10)
-
.map { |_, instances| instances.first }
-
end
-
end
-
-
def aggregate_emotional_tone(analyses)
-
emotions = analyses.map { |a| a[:emotional_tone] }.compact
-
-
return {} if emotions.empty?
-
-
{
-
primary_emotion: most_common(emotions.map { |e| e[:primary_emotion] }),
-
emotional_range: most_common(emotions.map { |e| e[:emotional_range] }),
-
positivity_ratio: (emotions.map { |e| e[:positivity_ratio] || 0.5 }.sum / emotions.size).round(2)
-
}
-
end
-
-
def most_common(array)
-
return nil if array.empty?
-
array.group_by(&:itself).max_by { |_, v| v.size }&.first
-
end
-
-
def validate_patterns(patterns_data)
-
return {} unless patterns_data.is_a?(Hash)
-
-
{
-
common_phrases: Array(patterns_data['common_phrases']).first(10),
-
power_words: Array(patterns_data['power_words']).first(10),
-
transitions: Array(patterns_data['transitions']).first(5),
-
openings: Array(patterns_data['openings']).first(5),
-
closings: Array(patterns_data['closings']).first(5)
-
}
-
end
-
-
def validate_emotional_tone(emotional_data)
-
return {} unless emotional_data.is_a?(Hash)
-
-
{
-
primary_emotion: emotional_data['primary_emotion'] || 'neutral',
-
emotional_range: emotional_data['emotional_range'] || 'moderate',
-
positivity_ratio: [emotional_data['positivity_ratio'].to_f, 1.0].min
-
}
-
end
-
-
def cross_validate_findings(voice_attrs, brand_vals, messaging_pillars, guidelines)
-
# Cross-reference all findings for consistency
-
validated = {
-
voice_attributes: voice_attrs,
-
brand_values: brand_vals,
-
messaging_pillars: messaging_pillars,
-
guidelines: guidelines
-
}
-
-
# Validate voice attributes against guidelines
-
voice_guideline_alignment = validate_voice_against_guidelines(voice_attrs, guidelines)
-
-
# Validate brand values against messaging pillars
-
value_pillar_alignment = validate_values_against_pillars(brand_vals, messaging_pillars)
-
-
# Validate tone consistency across all elements
-
tone_consistency = validate_tone_consistency(voice_attrs, guidelines, messaging_pillars)
-
-
# Add validation metadata
-
validated[:validation_results] = {
-
voice_guideline_alignment: voice_guideline_alignment,
-
value_pillar_alignment: value_pillar_alignment,
-
tone_consistency: tone_consistency,
-
overall_coherence: calculate_overall_coherence(voice_guideline_alignment, value_pillar_alignment, tone_consistency)
-
}
-
-
# Adjust findings based on validation
-
if validated[:validation_results][:overall_coherence] < 0.7
-
validated = reconcile_inconsistencies(validated)
-
end
-
-
validated
-
end
-
-
def validate_voice_against_guidelines(voice_attrs, guidelines)
-
alignment_score = 1.0
-
misalignments = []
-
-
# Check if voice formality matches guideline requirements
-
if guidelines[:voice_tone_rules][:must_do]
-
formal_guidelines = guidelines[:voice_tone_rules][:must_do].select { |rule|
-
rule.downcase.include?('formal') || rule.downcase.include?('professional')
-
}
-
-
if formal_guidelines.any? && voice_attrs[:formality][:level] == 'very_casual'
-
alignment_score -= 0.3
-
misalignments << "Voice formality conflicts with guidelines"
-
end
-
end
-
-
# Check tone alignment
-
prohibited_tones = guidelines[:voice_tone_rules][:must_not_do] || []
-
used_tones = [voice_attrs[:tone][:primary]] + (voice_attrs[:tone][:secondary] || [])
-
-
conflicts = used_tones.select { |tone|
-
prohibited_tones.any? { |rule| rule.downcase.include?(tone.downcase) }
-
}
-
-
if conflicts.any?
-
alignment_score -= 0.2 * conflicts.size
-
misalignments << "Conflicting tones: #{conflicts.join(', ')}"
-
end
-
-
{
-
score: [alignment_score, 0].max,
-
misalignments: misalignments,
-
recommendation: alignment_score < 0.7 ? "Review and reconcile voice guidelines" : "Good alignment"
-
}
-
end
-
-
def validate_values_against_pillars(brand_values, messaging_pillars)
-
# Check if brand values are reflected in messaging pillars
-
values = brand_values.map { |v| v[:name].downcase }
-
pillar_content = messaging_pillars[:pillars].flat_map { |p|
-
[p[:name], p[:description]] + p[:key_messages]
-
}.join(' ').downcase
-
-
reflected_values = values.select { |value|
-
pillar_content.include?(value) ||
-
pillar_content.include?(value.gsub('-', ' '))
-
}
-
-
alignment_score = reflected_values.size.to_f / values.size
-
-
{
-
score: alignment_score,
-
reflected: reflected_values,
-
missing: values - reflected_values,
-
recommendation: alignment_score < 0.6 ? "Strengthen value representation in messaging" : "Values well represented"
-
}
-
end
-
-
def validate_tone_consistency(voice_attrs, guidelines, messaging_pillars)
-
all_tones = []
-
-
# Collect tones from voice analysis
-
all_tones << voice_attrs[:tone][:primary]
-
all_tones.concat(voice_attrs[:tone][:secondary] || [])
-
-
# Collect implied tones from guidelines
-
guideline_text = guidelines.values.flatten.join(' ').downcase
-
TONE_ATTRIBUTES.each do |tone|
-
all_tones << tone if guideline_text.include?(tone.downcase)
-
end
-
-
# Collect tones from messaging pillars
-
pillars_text = messaging_pillars[:pillars].map { |p| p[:target_emotion] }.compact
-
all_tones.concat(pillars_text)
-
-
# Calculate consistency
-
tone_groups = all_tones.group_by(&:downcase)
-
consistency_score = tone_groups.values.map(&:size).max.to_f / all_tones.size
-
-
{
-
score: consistency_score,
-
dominant_tones: tone_groups.sort_by { |_, v| -v.size }.first(3).map(&:first),
-
variation: 1.0 - consistency_score,
-
recommendation: consistency_score < 0.5 ? "Establish clearer tone direction" : "Consistent tone usage"
-
}
-
end
-
-
def calculate_overall_coherence(voice_alignment, value_alignment, tone_consistency)
-
weights = {
-
voice: 0.35,
-
values: 0.35,
-
tone: 0.30
-
}
-
-
(
-
voice_alignment[:score] * weights[:voice] +
-
value_alignment[:score] * weights[:values] +
-
tone_consistency[:score] * weights[:tone]
-
).round(2)
-
end
-
-
def reconcile_inconsistencies(validated)
-
# Adjust findings to resolve major inconsistencies
-
coherence = validated[:validation_results][:overall_coherence]
-
-
if coherence < 0.5
-
# Major inconsistencies - flag for manual review
-
validated[:requires_manual_review] = true
-
validated[:inconsistency_notes] = generate_inconsistency_report(validated[:validation_results])
-
elsif coherence < 0.7
-
# Minor inconsistencies - attempt automatic reconciliation
-
-
# Adjust secondary tones that conflict
-
if validated[:validation_results][:voice_guideline_alignment][:misalignments].any?
-
conflicting_tones = validated[:voice_attributes][:tone][:secondary].select { |tone|
-
validated[:guidelines][:voice_tone_rules][:must_not_do]&.any? { |rule|
-
rule.downcase.include?(tone.downcase)
-
}
-
}
-
-
validated[:voice_attributes][:tone][:secondary] -= conflicting_tones
-
validated[:voice_attributes][:tone][:avoided] = conflicting_tones
-
end
-
end
-
-
validated
-
end
-
-
def generate_inconsistency_report(validation_results)
-
report = []
-
-
if validation_results[:voice_guideline_alignment][:score] < 0.7
-
report << "Voice attributes conflict with stated guidelines: #{validation_results[:voice_guideline_alignment][:misalignments].join('; ')}"
-
end
-
-
if validation_results[:value_pillar_alignment][:score] < 0.6
-
report << "Brand values not well reflected in messaging: Missing #{validation_results[:value_pillar_alignment][:missing].join(', ')}"
-
end
-
-
if validation_results[:tone_consistency][:score] < 0.5
-
report << "Inconsistent tone usage across brand materials"
-
end
-
-
report
-
end
-
-
def extract_image_subjects(assets)
-
subjects = []
-
-
assets.each do |asset|
-
if asset.metadata['subjects'].present?
-
subjects.concat(Array(asset.metadata['subjects']))
-
end
-
end
-
-
subjects.group_by(&:itself)
-
.sort_by { |_, instances| -instances.size }
-
.first(10)
-
.map { |subject, _| subject }
-
end
-
-
def analyze_image_color_treatment(assets)
-
treatments = []
-
-
assets.each do |asset|
-
if asset.metadata['color_treatment'].present?
-
treatments << asset.metadata['color_treatment']
-
end
-
end
-
-
{
-
dominant_treatment: most_common(treatments) || "natural",
-
variations: treatments.uniq
-
}
-
end
-
-
def analyze_composition(assets)
-
compositions = []
-
-
assets.each do |asset|
-
if asset.metadata['composition'].present?
-
compositions << asset.metadata['composition']
-
end
-
end
-
-
{
-
common_patterns: compositions.group_by(&:itself)
-
.sort_by { |_, v| -v.size }
-
.first(5)
-
.map(&:first),
-
guidelines: "Follow rule of thirds, maintain visual hierarchy"
-
}
-
end
-
-
def calculate_comprehensive_confidence_score(validated_data)
-
scores = {}
-
-
# Content volume score
-
content_score = calculate_content_volume_score
-
scores[:content_volume] = content_score
-
-
# Voice consistency score
-
voice_consistency = validated_data[:voice_attributes][:consistency_score] || 0.5
-
scores[:voice_consistency] = voice_consistency
-
-
# Value extraction confidence
-
value_confidence = calculate_value_extraction_confidence(validated_data[:brand_values])
-
scores[:value_confidence] = value_confidence
-
-
# Messaging clarity score
-
messaging_clarity = calculate_messaging_clarity(validated_data[:messaging_pillars])
-
scores[:messaging_clarity] = messaging_clarity
-
-
# Guidelines completeness
-
guidelines_completeness = calculate_guidelines_completeness(validated_data[:guidelines])
-
scores[:guidelines_completeness] = guidelines_completeness
-
-
# Visual analysis confidence (if applicable)
-
if validated_data[:visual_guidelines].present? && validated_data[:visual_guidelines].any?
-
visual_confidence = validated_data[:visual_guidelines][:visual_consistency] || 0.5
-
scores[:visual_confidence] = visual_confidence
-
end
-
-
# Cross-validation score
-
validation_score = validated_data[:validation_results][:overall_coherence] || 0.7
-
scores[:cross_validation] = validation_score
-
-
# Calculate weighted overall score
-
weights = {
-
content_volume: 0.15,
-
voice_consistency: 0.20,
-
value_confidence: 0.15,
-
messaging_clarity: 0.15,
-
guidelines_completeness: 0.15,
-
visual_confidence: 0.10,
-
cross_validation: 0.20
-
}
-
-
overall_score = scores.sum { |key, score|
-
weight = weights[key] || 0
-
score * weight
-
}
-
-
{
-
overall: overall_score.round(2),
-
breakdown: scores,
-
confidence_level: determine_confidence_level(overall_score),
-
recommendations: generate_confidence_recommendations(scores)
-
}
-
end
-
-
def calculate_content_volume_score
-
word_count = @content.split.size
-
source_count = @content_sources&.size || 1
-
-
# Score based on word count
-
volume_score = case word_count
-
when 0..500 then 0.2
-
when 501..1000 then 0.4
-
when 1001..3000 then 0.6
-
when 3001..7000 then 0.8
-
when 7001..15000 then 0.9
-
else 1.0
-
end
-
-
# Bonus for multiple sources
-
source_bonus = [source_count * 0.05, 0.2].min
-
-
[volume_score + source_bonus, 1.0].min
-
end
-
-
def calculate_value_extraction_confidence(brand_values)
-
return 0.3 if brand_values.empty?
-
-
# Average confidence of top values
-
top_values = brand_values.first(5)
-
avg_score = top_values.map { |v| v[:score] }.sum / top_values.size
-
-
# Bonus for explicit values
-
explicit_count = brand_values.count { |v| v[:type] == :explicit }
-
explicit_bonus = [explicit_count * 0.1, 0.3].min
-
-
[avg_score + explicit_bonus, 1.0].min
-
end
-
-
def calculate_messaging_clarity(messaging_data)
-
return 0.3 unless messaging_data[:pillars].any?
-
-
pillars = messaging_data[:pillars]
-
-
# Score based on pillar strength and consistency
-
avg_strength = pillars.map { |p| p[:strength_score] }.sum / pillars.size
-
avg_consistency = pillars.map { |p| p[:consistency_score] }.sum / pillars.size
-
-
(avg_strength * 0.6 + avg_consistency * 0.4).round(2)
-
end
-
-
def calculate_guidelines_completeness(guidelines)
-
total_categories = 5 # voice, messaging, visual, grammar, behavioral
-
populated_categories = 0
-
total_rules = 0
-
-
[:voice_tone_rules, :messaging_rules, :visual_rules, :grammar_style_rules, :behavioral_rules].each do |category|
-
if guidelines[category].present? && guidelines[category].any? { |_, v| v.present? && v.any? }
-
populated_categories += 1
-
total_rules += guidelines[category].values.flatten.size
-
end
-
end
-
-
category_score = populated_categories.to_f / total_categories
-
-
# Bonus for having many specific rules
-
rule_bonus = case total_rules
-
when 0..5 then 0
-
when 6..15 then 0.1
-
when 16..30 then 0.2
-
else 0.3
-
end
-
-
[category_score + rule_bonus, 1.0].min
-
end
-
-
def determine_confidence_level(score)
-
case score
-
when 0.9..1.0 then "Very High"
-
when 0.75..0.89 then "High"
-
when 0.6..0.74 then "Moderate"
-
when 0.4..0.59 then "Low"
-
else "Very Low"
-
end
-
end
-
-
def generate_confidence_recommendations(scores)
-
recommendations = []
-
-
scores.each do |aspect, score|
-
if score < 0.6
-
case aspect
-
when :content_volume
-
recommendations << "Upload more brand materials for comprehensive analysis"
-
when :voice_consistency
-
recommendations << "Review brand voice for consistency across materials"
-
when :value_confidence
-
recommendations << "Clarify and explicitly state core brand values"
-
when :messaging_clarity
-
recommendations << "Develop clearer messaging pillars and key messages"
-
when :guidelines_completeness
-
recommendations << "Create more comprehensive brand guidelines"
-
when :visual_confidence
-
recommendations << "Ensure visual assets follow consistent style"
-
when :cross_validation
-
recommendations << "Align voice, values, and messaging for coherence"
-
end
-
end
-
end
-
-
recommendations
-
end
-
-
def create_comprehensive_guidelines(analysis)
-
guidelines = []
-
-
# Process each category of rules
-
process_voice_tone_guidelines(analysis, guidelines)
-
process_messaging_guidelines(analysis, guidelines)
-
process_visual_guidelines(analysis, guidelines)
-
process_grammar_style_guidelines(analysis, guidelines)
-
process_behavioral_guidelines(analysis, guidelines)
-
-
# Create high-priority rules from rule_priorities
-
if analysis.extracted_rules[:rule_priorities]
-
create_priority_guidelines(analysis.extracted_rules[:rule_priorities], guidelines)
-
end
-
-
guidelines
-
end
-
-
def process_voice_tone_guidelines(analysis, guidelines)
-
rules = analysis.extracted_rules[:voice_tone_rules] || {}
-
-
# Must do rules
-
rules[:must_do]&.each_with_index do |rule, index|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: rule,
-
category: "voice",
-
priority: 9 - (index * 0.1),
-
metadata: { source: "analysis", confidence: analysis.confidence_score }
-
)
-
end
-
-
# Should do rules
-
rules[:should_do]&.each_with_index do |rule, index|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "should",
-
rule_content: rule,
-
category: "voice",
-
priority: 7 - (index * 0.1),
-
metadata: { source: "analysis" }
-
)
-
end
-
-
# Must not do rules
-
rules[:must_not_do]&.each_with_index do |rule, index|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must_not",
-
rule_content: rule,
-
category: "voice",
-
priority: 8 - (index * 0.1),
-
metadata: { source: "analysis" }
-
)
-
end
-
end
-
-
def process_messaging_guidelines(analysis, guidelines)
-
rules = analysis.extracted_rules[:messaging_rules] || {}
-
-
# Required elements
-
rules[:required_elements]&.each do |element|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: "Include: #{element}",
-
category: "messaging",
-
priority: 8.5,
-
metadata: { element_type: "required" }
-
)
-
end
-
-
# Key phrases
-
if rules[:key_phrases]&.any?
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "should",
-
rule_content: "Use key phrases: #{rules[:key_phrases].join(', ')}",
-
category: "messaging",
-
priority: 7,
-
metadata: { phrases: rules[:key_phrases] }
-
)
-
end
-
-
# Prohibited topics
-
rules[:prohibited_topics]&.each do |topic|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must_not",
-
rule_content: "Avoid discussing: #{topic}",
-
category: "messaging",
-
priority: 8,
-
metadata: { topic_type: "prohibited" }
-
)
-
end
-
end
-
-
def process_visual_guidelines(analysis, guidelines)
-
visual = analysis.extracted_rules[:visual_rules] || {}
-
-
# Color rules
-
if visual[:colors]&.any? { |_, v| v.present? && v.any? }
-
color_rule = build_color_rule(visual[:colors])
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: color_rule,
-
category: "visual",
-
priority: 9,
-
metadata: { colors: visual[:colors] }
-
)
-
end
-
-
# Typography rules
-
if visual[:typography][:fonts]&.any?
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: "Use fonts: #{visual[:typography][:fonts].join(', ')}",
-
category: "visual",
-
priority: 8.5,
-
metadata: { typography: visual[:typography] }
-
)
-
end
-
-
# Imagery rules
-
if visual[:imagery][:do]&.any?
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "should",
-
rule_content: "Image style: #{visual[:imagery][:style]}. #{visual[:imagery][:do].first(3).join('; ')}",
-
category: "visual",
-
priority: 7
-
)
-
end
-
-
if visual[:imagery][:dont]&.any?
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must_not",
-
rule_content: "Avoid: #{visual[:imagery][:dont].first(3).join('; ')}",
-
category: "visual",
-
priority: 7.5
-
)
-
end
-
end
-
-
def build_color_rule(colors)
-
parts = []
-
parts << "Primary colors: #{colors[:primary].join(', ')}" if colors[:primary]&.any?
-
parts << "Secondary colors: #{colors[:secondary].join(', ')}" if colors[:secondary]&.any?
-
parts.join('. ')
-
end
-
-
def process_grammar_style_guidelines(analysis, guidelines)
-
rules = analysis.extracted_rules[:grammar_style_rules] || {}
-
-
# Combine all grammar rules into comprehensive guidelines
-
if rules.any? { |_, v| v.present? && v.any? }
-
style_rules = []
-
style_rules.concat(rules[:punctuation] || [])
-
style_rules.concat(rules[:capitalization] || [])
-
style_rules.concat(rules[:formatting] || [])
-
-
if style_rules.any?
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: "Follow style rules: #{style_rules.first(5).join('; ')}",
-
category: "grammar",
-
priority: 7,
-
metadata: { style_rules: rules }
-
)
-
end
-
end
-
-
# Preferred terms
-
if rules[:preferred_terms]&.any?
-
term_guidelines = rules[:preferred_terms].map { |preferred, avoid|
-
"Use '#{preferred}' instead of '#{avoid}'"
-
}
-
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "should",
-
rule_content: term_guidelines.join('; '),
-
category: "grammar",
-
priority: 6.5,
-
metadata: { terms: rules[:preferred_terms] }
-
)
-
end
-
end
-
-
def process_behavioral_guidelines(analysis, guidelines)
-
rules = analysis.extracted_rules[:behavioral_rules] || {}
-
-
# Customer interaction rules
-
rules[:customer_interaction]&.each do |rule|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: rule,
-
category: "behavior",
-
priority: 8,
-
metadata: { interaction_type: "customer" }
-
)
-
end
-
-
# Response patterns
-
if rules[:response_patterns]&.any?
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "should",
-
rule_content: "Response approach: #{rules[:response_patterns].join('; ')}",
-
category: "behavior",
-
priority: 7
-
)
-
end
-
-
# Ethical guidelines
-
rules[:ethical_guidelines]&.each do |guideline|
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: guideline,
-
category: "behavior",
-
priority: 9,
-
metadata: { guideline_type: "ethical" }
-
)
-
end
-
end
-
-
def create_priority_guidelines(priorities, guidelines)
-
# Create guidelines for the highest priority rules
-
priorities.select { |p| p[:importance] >= 8 }.each do |priority_rule|
-
existing = guidelines.find { |g|
-
g.rule_content.downcase.include?(priority_rule[:rule].downcase)
-
}
-
-
unless existing
-
guidelines << brand.brand_guidelines.create!(
-
rule_type: "must",
-
rule_content: priority_rule[:rule],
-
category: priority_rule[:category] || "general",
-
priority: priority_rule[:importance],
-
metadata: {
-
consequences: priority_rule[:consequences],
-
source: "high_priority_analysis"
-
}
-
)
-
end
-
end
-
end
-
-
def update_messaging_framework_detailed(analysis)
-
framework = brand.messaging_framework || brand.build_messaging_framework
-
-
# Extract comprehensive tone data
-
tone_data = {
-
primary: analysis.voice_attributes[:tone][:primary],
-
secondary: analysis.voice_attributes[:tone][:secondary],
-
avoided: analysis.voice_attributes[:tone][:avoided],
-
emotional_tone: analysis.voice_attributes[:emotional_tone],
-
consistency: analysis.voice_attributes[:tone][:consistency]
-
}
-
-
# Build structured key messages from pillars
-
key_messages = build_structured_key_messages(analysis.messaging_pillars)
-
-
# Create value propositions with evidence
-
value_props = build_evidence_based_value_propositions(analysis)
-
-
# Update framework with comprehensive data
-
framework.update!(
-
tone_attributes: tone_data,
-
key_messages: key_messages,
-
value_propositions: value_props,
-
audience_personas: extract_audience_insights(analysis),
-
differentiation_points: extract_differentiators(analysis),
-
brand_promise: generate_brand_promise(analysis),
-
elevator_pitch: generate_elevator_pitch(analysis)
-
)
-
-
framework
-
end
-
-
def build_structured_key_messages(messaging_pillars)
-
return {} unless messaging_pillars[:pillars].present?
-
-
messages = {}
-
-
messaging_pillars[:pillars].each do |pillar|
-
messages[pillar[:name]] = {
-
core_message: pillar[:description],
-
supporting_points: pillar[:key_messages] || [],
-
proof_points: pillar[:supporting_points] || [],
-
emotional_goal: pillar[:target_emotion],
-
usage_contexts: determine_usage_contexts(pillar)
-
}
-
end
-
-
# Add hierarchy information
-
messages[:hierarchy] = messaging_pillars[:pillar_hierarchy]
-
-
messages
-
end
-
-
def build_evidence_based_value_propositions(analysis)
-
primary_values = analysis.brand_values.first(3)
-
-
{
-
core_value_prop: generate_core_value_proposition(primary_values, analysis.messaging_pillars),
-
supporting_props: primary_values.map { |value|
-
{
-
value: value[:name],
-
proposition: "We deliver #{value[:name].downcase} through #{value[:contexts].first}",
-
evidence: value[:evidence],
-
strength: value[:score]
-
}
-
},
-
proof_points: extract_proof_points(analysis),
-
competitive_advantages: identify_competitive_advantages(analysis)
-
}
-
end
-
-
def generate_core_value_proposition(values, pillars)
-
# Generate a cohesive value proposition from top values and pillars
-
value_names = values.map { |v| v[:name] }.join(', ')
-
primary_pillar = pillars[:pillars].first
-
-
"We deliver #{value_names} by #{primary_pillar[:description].downcase}, "\
-
"enabling #{primary_pillar[:target_emotion] || 'success'} for our customers."
-
end
-
-
def extract_audience_insights(analysis)
-
# Extract implied audience characteristics from voice and messaging
-
{
-
communication_preferences: determine_audience_preferences(analysis.voice_attributes),
-
value_alignment: analysis.brand_values.map { |v| v[:name] },
-
emotional_drivers: extract_emotional_drivers(analysis.messaging_pillars),
-
sophistication_level: determine_audience_sophistication(analysis.voice_attributes)
-
}
-
end
-
-
def determine_audience_preferences(voice_attrs)
-
preferences = []
-
-
case voice_attrs[:formality][:level]
-
when 'very_formal', 'formal'
-
preferences << "Professional communication"
-
preferences << "Detailed information"
-
when 'casual', 'very_casual'
-
preferences << "Conversational tone"
-
preferences << "Quick, digestible content"
-
else
-
preferences << "Balanced communication style"
-
end
-
-
case voice_attrs[:style][:writing]
-
when 'technical'
-
preferences << "Data-driven insights"
-
preferences << "Specific details"
-
when 'storytelling'
-
preferences << "Narrative examples"
-
preferences << "Relatable scenarios"
-
end
-
-
preferences
-
end
-
-
def extract_emotional_drivers(messaging_pillars)
-
pillars = messaging_pillars[:pillars] || []
-
-
drivers = pillars.map { |p| p[:target_emotion] }.compact.uniq
-
drivers.presence || ['trust', 'confidence', 'success']
-
end
-
-
def determine_audience_sophistication(voice_attrs)
-
case voice_attrs[:style][:vocabulary]
-
when 'advanced', 'technical'
-
'High - Expert level'
-
when 'intermediate'
-
'Medium - Professional level'
-
else
-
'Accessible - General audience'
-
end
-
end
-
-
def extract_differentiators(analysis)
-
differentiators = []
-
-
# Extract from messaging pillars
-
analysis.messaging_pillars[:pillars].each do |pillar|
-
if pillar[:name].downcase.include?('unique') ||
-
pillar[:name].downcase.include?('different') ||
-
pillar[:description].downcase.include?('only')
-
differentiators << {
-
point: pillar[:name],
-
description: pillar[:description],
-
evidence: pillar[:supporting_points]
-
}
-
end
-
end
-
-
# Extract from brand values that suggest differentiation
-
unique_values = analysis.brand_values.select { |v|
-
v[:score] > 0.8 && v[:type] == :explicit
-
}
-
-
unique_values.each do |value|
-
differentiators << {
-
point: "#{value[:name]} Leadership",
-
description: "Demonstrated commitment to #{value[:name].downcase}",
-
evidence: value[:evidence]
-
}
-
end
-
-
differentiators.first(5)
-
end
-
-
def generate_brand_promise(analysis)
-
# Create a concise brand promise from values and pillars
-
top_value = analysis.brand_values.first[:name]
-
primary_pillar = analysis.messaging_pillars[:pillars].first
-
-
"We promise to deliver #{top_value.downcase} through #{primary_pillar[:description].downcase}, "\
-
"ensuring #{primary_pillar[:target_emotion] || 'exceptional outcomes'} in every interaction."
-
end
-
-
def generate_elevator_pitch(analysis)
-
# Create a 30-second elevator pitch
-
values = analysis.brand_values.first(2).map { |v| v[:name] }.join(' and ')
-
pillars = analysis.messaging_pillars[:pillars].first(2)
-
-
"We are committed to #{values.downcase}, #{pillars.first[:description].downcase}. "\
-
"#{pillars.second ? "We also #{pillars.second[:description].downcase}, " : ''}"\
-
"delivering #{analysis.voice_attributes[:emotional_tone][:primary_emotion] || 'positive'} "\
-
"experiences that #{pillars.first[:key_messages].first&.downcase || 'drive results'}."
-
end
-
-
def determine_usage_contexts(pillar)
-
contexts = []
-
-
# Determine contexts based on pillar content
-
keywords = (pillar[:name] + ' ' + pillar[:description]).downcase
-
-
contexts << "Sales conversations" if keywords.include?('value') || keywords.include?('benefit')
-
contexts << "Marketing materials" if keywords.include?('brand') || keywords.include?('story')
-
contexts << "Customer support" if keywords.include?('help') || keywords.include?('support')
-
contexts << "Product descriptions" if keywords.include?('feature') || keywords.include?('capability')
-
contexts << "Executive communications" if keywords.include?('vision') || keywords.include?('leadership')
-
-
contexts.presence || ["General communications"]
-
end
-
-
def extract_proof_points(analysis)
-
proof_points = []
-
-
# Extract from pillar supporting points
-
analysis.messaging_pillars[:pillars].each do |pillar|
-
pillar[:supporting_points]&.each do |point|
-
proof_points << {
-
claim: pillar[:name],
-
proof: point,
-
strength: pillar[:strength_score]
-
}
-
end
-
end
-
-
# Extract from value evidence
-
analysis.brand_values.each do |value|
-
value[:evidence]&.each do |evidence|
-
proof_points << {
-
claim: value[:name],
-
proof: evidence,
-
strength: value[:score]
-
}
-
end
-
end
-
-
# Sort by strength and take top proof points
-
proof_points.sort_by { |p| -p[:strength] }.first(10)
-
end
-
-
def identify_competitive_advantages(analysis)
-
advantages = []
-
-
# Look for superlatives and unique claims in pillars
-
analysis.messaging_pillars[:pillars].each do |pillar|
-
pillar[:key_messages]&.each do |message|
-
if message =~ /best|first|only|unique|leading|superior/i
-
advantages << message
-
end
-
end
-
end
-
-
# Look for high-scoring explicit values
-
top_values = analysis.brand_values.select { |v| v[:score] > 0.85 && v[:type] == :explicit }
-
top_values.each do |value|
-
advantages << "Industry-leading commitment to #{value[:name].downcase}"
-
end
-
-
advantages.uniq.first(5)
-
end
-
-
def generate_brand_consistency_report(analysis)
-
# This could be expanded to create a detailed consistency report
-
# For now, we'll add it to the analysis notes
-
-
consistency_data = {
-
voice_consistency: analysis.voice_attributes[:consistency_score],
-
value_alignment: analysis.analysis_data.dig('validation_results', 'value_pillar_alignment', 'score'),
-
tone_consistency: analysis.analysis_data.dig('validation_results', 'tone_consistency', 'score'),
-
rule_consistency: analysis.extracted_rules[:rule_consistency],
-
visual_consistency: analysis.visual_guidelines[:visual_consistency],
-
overall_coherence: analysis.analysis_data.dig('validation_results', 'overall_coherence')
-
}
-
-
report_summary = consistency_data.map { |aspect, score|
-
"#{aspect.to_s.humanize}: #{(score * 100).round}%" if score
-
}.compact.join(', ')
-
-
analysis.update!(
-
analysis_notes: (analysis.analysis_notes || '') + "\n\nConsistency Report: #{report_summary}"
-
)
-
end
-
-
def llm_service
-
@llm_service ||= LlmService.new(
-
model: @llm_provider,
-
temperature: @options[:temperature] || 0.7
-
)
-
end
-
end
-
end
-
module Branding
-
class AssetProcessor
-
attr_reader :brand_asset, :errors
-
-
def initialize(brand_asset)
-
@brand_asset = brand_asset
-
@errors = []
-
end
-
-
def process
-
return false unless brand_asset.file.attached?
-
-
brand_asset.mark_as_processing!
-
-
begin
-
case determine_asset_type
-
when :pdf
-
process_pdf
-
when :document
-
process_document
-
when :image
-
process_image
-
when :archive
-
process_archive
-
else
-
add_error("Unsupported file type: #{brand_asset.content_type}")
-
return false
-
end
-
-
brand_asset.mark_as_completed!
-
true
-
rescue StandardError => e
-
add_error("Processing failed: #{e.message}")
-
brand_asset.mark_as_failed!(e.message)
-
false
-
end
-
end
-
-
private
-
-
def determine_asset_type
-
return :pdf if brand_asset.content_type == "application/pdf"
-
return :document if brand_asset.document?
-
return :image if brand_asset.image?
-
return :archive if brand_asset.archive?
-
nil
-
end
-
-
def process_pdf
-
text = extract_pdf_text
-
metadata = extract_pdf_metadata
-
-
brand_asset.update!(
-
extracted_text: text,
-
extracted_data: {
-
page_count: metadata[:page_count],
-
title: metadata[:title],
-
author: metadata[:author],
-
creation_date: metadata[:creation_date]
-
}
-
)
-
-
analyze_brand_content(text)
-
end
-
-
def extract_pdf_text
-
text = ""
-
-
brand_asset.file.blob.open do |file|
-
reader = PDF::Reader.new(file)
-
reader.pages.each do |page|
-
text += page.text + "\n"
-
end
-
end
-
-
text.strip
-
end
-
-
def extract_pdf_metadata
-
metadata = {}
-
-
brand_asset.file.blob.open do |file|
-
reader = PDF::Reader.new(file)
-
metadata[:page_count] = reader.page_count
-
metadata[:title] = reader.info[:Title]
-
metadata[:author] = reader.info[:Author]
-
metadata[:creation_date] = reader.info[:CreationDate]
-
end
-
-
metadata
-
end
-
-
def process_document
-
text = extract_document_text
-
-
brand_asset.update!(
-
extracted_text: text,
-
extracted_data: {
-
word_count: text.split.size,
-
character_count: text.length
-
}
-
)
-
-
analyze_brand_content(text)
-
end
-
-
def extract_document_text
-
case brand_asset.content_type
-
when "text/plain"
-
extract_plain_text
-
when "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
-
extract_docx_text
-
else
-
""
-
end
-
end
-
-
def extract_plain_text
-
brand_asset.file.download
-
end
-
-
def extract_docx_text
-
text = ""
-
-
brand_asset.file.blob.open do |file|
-
doc = Docx::Document.open(file)
-
doc.paragraphs.each do |p|
-
text += p.to_s + "\n"
-
end
-
end
-
-
text.strip
-
end
-
-
def process_image
-
metadata = extract_image_metadata
-
-
brand_asset.update!(
-
extracted_data: {
-
width: metadata[:width],
-
height: metadata[:height],
-
format: metadata[:format],
-
color_profile: metadata[:color_profile],
-
dominant_colors: extract_dominant_colors
-
}
-
)
-
-
# For logos and visual assets, we might want to run through image recognition
-
# or extract color palettes for brand consistency
-
end
-
-
def extract_image_metadata
-
metadata = {}
-
-
brand_asset.file.blob.analyze unless brand_asset.file.blob.analyzed?
-
-
metadata[:width] = brand_asset.file.blob.metadata[:width]
-
metadata[:height] = brand_asset.file.blob.metadata[:height]
-
metadata[:format] = brand_asset.file.blob.content_type
-
-
metadata
-
end
-
-
def extract_dominant_colors
-
# This is a placeholder - in production, you'd use a service like
-
# ImageMagick or a color extraction library
-
[]
-
end
-
-
def process_archive
-
# Extract and process files within the archive
-
extracted_files = []
-
-
brand_asset.file.blob.open do |file|
-
Zip::File.open(file) do |zip_file|
-
zip_file.each do |entry|
-
next if entry.directory?
-
-
extracted_files << {
-
name: entry.name,
-
size: entry.size,
-
type: determine_file_type(entry.name)
-
}
-
end
-
end
-
end
-
-
brand_asset.update!(
-
extracted_data: {
-
file_count: extracted_files.size,
-
files: extracted_files
-
}
-
)
-
end
-
-
def determine_file_type(filename)
-
extension = File.extname(filename).downcase
-
-
case extension
-
when '.pdf' then 'pdf'
-
when '.doc', '.docx' then 'document'
-
when '.txt' then 'text'
-
when '.jpg', '.jpeg', '.png', '.gif' then 'image'
-
else 'other'
-
end
-
end
-
-
def analyze_brand_content(text)
-
return if text.blank?
-
-
# Queue job for AI analysis
-
BrandAnalysisJob.perform_later(brand_asset.brand, text)
-
end
-
-
def add_error(message)
-
@errors << message
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class BaseValidator
-
attr_reader :brand, :content, :options, :violations, :suggestions
-
-
def initialize(brand, content, options = {})
-
@brand = brand
-
@content = content
-
@options = options
-
@violations = []
-
@suggestions = []
-
end
-
-
def validate
-
raise NotImplementedError, "Subclasses must implement validate method"
-
end
-
-
protected
-
-
def add_violation(type:, severity:, message:, details: {}, rule_id: nil)
-
violation = {
-
validator: self.class.name.demodulize.underscore,
-
type: type,
-
severity: severity.to_s,
-
message: message,
-
details: details,
-
rule_id: rule_id,
-
timestamp: Time.current,
-
position: detect_position(details)
-
}
-
-
@violations << violation
-
broadcast_violation(violation) if options[:real_time]
-
end
-
-
def add_suggestion(type:, message:, details: {}, priority: "medium", rule_id: nil)
-
suggestion = {
-
validator: self.class.name.demodulize.underscore,
-
type: type,
-
message: message,
-
details: details,
-
priority: priority,
-
rule_id: rule_id,
-
timestamp: Time.current
-
}
-
-
@suggestions << suggestion
-
end
-
-
def detect_position(details)
-
# Attempt to find position in content for the violation
-
if details[:text].present?
-
index = content.index(details[:text])
-
{ start: index, end: index + details[:text].length } if index
-
end
-
end
-
-
def broadcast_violation(violation)
-
ActionCable.server.broadcast(
-
"brand_compliance_#{brand.id}",
-
{
-
event: "violation_detected",
-
violation: violation
-
}
-
)
-
end
-
-
def cache_key(suffix = nil)
-
key_parts = [
-
"compliance",
-
self.class.name.underscore,
-
brand.id,
-
Digest::MD5.hexdigest(content.to_s)[0..10]
-
]
-
key_parts << suffix if suffix
-
key_parts.join(":")
-
end
-
-
def cached_result(key, expires_in: 5.minutes)
-
Rails.cache.fetch(cache_key(key), expires_in: expires_in) do
-
yield
-
end
-
end
-
-
def severity_weight(severity)
-
case severity.to_s
-
when "critical" then 1.0
-
when "high" then 0.8
-
when "medium" then 0.5
-
when "low" then 0.3
-
else 0.4
-
end
-
end
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class CacheService
-
DEFAULT_EXPIRATION = 1.hour
-
RULE_EXPIRATION = 6.hours
-
RESULT_EXPIRATION = 30.minutes
-
-
class << self
-
def cache_store
-
Rails.cache
-
end
-
-
# Rule caching methods
-
def cache_rules(brand_id, rules, category = nil)
-
key = rule_cache_key(brand_id, category)
-
cache_store.write(key, rules, expires_in: RULE_EXPIRATION)
-
end
-
-
def get_cached_rules(brand_id, category = nil)
-
key = rule_cache_key(brand_id, category)
-
cache_store.read(key)
-
end
-
-
def invalidate_rules(brand_id)
-
pattern = rule_cache_pattern(brand_id)
-
delete_matching(pattern)
-
end
-
-
# Result caching methods
-
def cache_validation_result(brand_id, content_hash, validator_type, result)
-
key = result_cache_key(brand_id, content_hash, validator_type)
-
cache_store.write(key, result, expires_in: RESULT_EXPIRATION)
-
end
-
-
def get_cached_validation_result(brand_id, content_hash, validator_type)
-
key = result_cache_key(brand_id, content_hash, validator_type)
-
cache_store.read(key)
-
end
-
-
# Analysis caching methods
-
def cache_analysis(brand_id, content_hash, analysis_type, data)
-
key = analysis_cache_key(brand_id, content_hash, analysis_type)
-
expiration = analysis_expiration(analysis_type)
-
cache_store.write(key, data, expires_in: expiration)
-
end
-
-
def get_cached_analysis(brand_id, content_hash, analysis_type)
-
key = analysis_cache_key(brand_id, content_hash, analysis_type)
-
cache_store.read(key)
-
end
-
-
# Suggestion caching methods
-
def cache_suggestions(brand_id, violation_hash, suggestions)
-
key = suggestion_cache_key(brand_id, violation_hash)
-
cache_store.write(key, suggestions, expires_in: DEFAULT_EXPIRATION)
-
end
-
-
def get_cached_suggestions(brand_id, violation_hash)
-
key = suggestion_cache_key(brand_id, violation_hash)
-
cache_store.read(key)
-
end
-
-
# Batch operations
-
def preload_brand_cache(brand)
-
# Preload frequently accessed data
-
preload_rules(brand)
-
preload_guidelines(brand)
-
preload_analysis_data(brand)
-
end
-
-
def clear_brand_cache(brand_id)
-
patterns = [
-
rule_cache_pattern(brand_id),
-
result_cache_pattern(brand_id),
-
analysis_cache_pattern(brand_id),
-
suggestion_cache_pattern(brand_id)
-
]
-
-
patterns.each { |pattern| delete_matching(pattern) }
-
end
-
-
# Statistics and monitoring
-
def cache_statistics(brand_id)
-
{
-
rules_cached: count_matching(rule_cache_pattern(brand_id)),
-
results_cached: count_matching(result_cache_pattern(brand_id)),
-
analyses_cached: count_matching(analysis_cache_pattern(brand_id)),
-
suggestions_cached: count_matching(suggestion_cache_pattern(brand_id)),
-
total_size: estimate_cache_size(brand_id)
-
}
-
end
-
-
private
-
-
def rule_cache_key(brand_id, category = nil)
-
parts = ["compliance", "rules", brand_id]
-
parts << category if category
-
parts.join(":")
-
end
-
-
def rule_cache_pattern(brand_id)
-
"compliance:rules:#{brand_id}:*"
-
end
-
-
def result_cache_key(brand_id, content_hash, validator_type)
-
["compliance", "result", brand_id, content_hash, validator_type].join(":")
-
end
-
-
def result_cache_pattern(brand_id)
-
"compliance:result:#{brand_id}:*"
-
end
-
-
def analysis_cache_key(brand_id, content_hash, analysis_type)
-
["compliance", "analysis", brand_id, content_hash, analysis_type].join(":")
-
end
-
-
def analysis_cache_pattern(brand_id)
-
"compliance:analysis:#{brand_id}:*"
-
end
-
-
def suggestion_cache_key(brand_id, violation_hash)
-
["compliance", "suggestions", brand_id, violation_hash].join(":")
-
end
-
-
def suggestion_cache_pattern(brand_id)
-
"compliance:suggestions:#{brand_id}:*"
-
end
-
-
def analysis_expiration(analysis_type)
-
case analysis_type.to_s
-
when "tone", "sentiment"
-
2.hours # These change less frequently
-
when "readability", "keyword_density"
-
1.hour
-
else
-
DEFAULT_EXPIRATION
-
end
-
end
-
-
def delete_matching(pattern)
-
if cache_store.respond_to?(:delete_matched)
-
cache_store.delete_matched(pattern)
-
else
-
# Fallback for cache stores that don't support pattern deletion
-
Rails.logger.warn "Cache store doesn't support delete_matched"
-
end
-
end
-
-
def count_matching(pattern)
-
if cache_store.respond_to?(:keys)
-
cache_store.keys(pattern).count
-
else
-
0
-
end
-
end
-
-
def estimate_cache_size(brand_id)
-
# This is an estimate - actual implementation depends on cache store
-
patterns = [
-
rule_cache_pattern(brand_id),
-
result_cache_pattern(brand_id),
-
analysis_cache_pattern(brand_id),
-
suggestion_cache_pattern(brand_id)
-
]
-
-
total_keys = patterns.sum { |pattern| count_matching(pattern) }
-
# Estimate 1KB average per cached item
-
"~#{total_keys}KB"
-
end
-
-
def preload_rules(brand)
-
# Load and cache all active rules
-
rule_engine = RuleEngine.new(brand)
-
categories = %w[content style visual messaging legal]
-
-
categories.each do |category|
-
rules = rule_engine.get_rules_for_category(category)
-
cache_rules(brand.id, rules, category) if rules.any?
-
end
-
end
-
-
def preload_guidelines(brand)
-
# Cache frequently accessed guidelines
-
guidelines_by_category = brand.brand_guidelines.active.group_by(&:category)
-
-
guidelines_by_category.each do |category, guidelines|
-
key = ["compliance", "guidelines", brand.id, category].join(":")
-
cache_store.write(key, guidelines.map(&:attributes), expires_in: RULE_EXPIRATION)
-
end
-
end
-
-
def preload_analysis_data(brand)
-
# Cache brand analysis data
-
if latest_analysis = brand.latest_analysis
-
key = ["compliance", "brand_analysis", brand.id].join(":")
-
cache_store.write(key, {
-
voice_attributes: latest_analysis.voice_attributes,
-
sentiment_profile: latest_analysis.sentiment_profile,
-
keywords: latest_analysis.keywords,
-
emotional_targets: latest_analysis.emotional_targets
-
}, expires_in: 6.hours)
-
end
-
end
-
end
-
-
# Instance methods for request-scoped caching
-
def initialize
-
@request_cache = {}
-
end
-
-
def fetch(key, &block)
-
@request_cache[key] ||= block.call
-
end
-
-
def clear
-
@request_cache.clear
-
end
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class EventBroadcaster
-
attr_reader :brand_id, :session_id, :user_id
-
-
def initialize(brand_id, session_id = nil, user_id = nil)
-
@brand_id = brand_id
-
@session_id = session_id
-
@user_id = user_id
-
end
-
-
def broadcast_validation_start(content_info = {})
-
broadcast_event("validation_started", {
-
content_type: content_info[:type],
-
content_length: content_info[:length],
-
validators: content_info[:validators]
-
})
-
end
-
-
def broadcast_validator_progress(validator_name, progress)
-
broadcast_event("validator_progress", {
-
validator: validator_name,
-
progress: progress,
-
status: progress >= 1.0 ? "completed" : "in_progress"
-
})
-
end
-
-
def broadcast_violation_detected(violation)
-
broadcast_event("violation_detected", {
-
violation: sanitize_violation(violation),
-
timestamp: Time.current
-
})
-
end
-
-
def broadcast_suggestion_generated(suggestion)
-
broadcast_event("suggestion_generated", {
-
suggestion: sanitize_suggestion(suggestion),
-
timestamp: Time.current
-
})
-
end
-
-
def broadcast_validation_complete(results)
-
broadcast_event("validation_complete", {
-
compliant: results[:compliant],
-
score: results[:score],
-
violations_count: results[:violations]&.count || 0,
-
suggestions_count: results[:suggestions]&.count || 0,
-
processing_time: results[:metadata]&.dig(:processing_time),
-
summary: results[:summary]
-
})
-
end
-
-
def broadcast_fix_applied(fix_info)
-
broadcast_event("fix_applied", {
-
violation_id: fix_info[:violation_id],
-
fix_type: fix_info[:fix_type],
-
confidence: fix_info[:confidence],
-
preview: truncate_content(fix_info[:preview])
-
})
-
end
-
-
def broadcast_error(error_info)
-
broadcast_event("validation_error", {
-
error_type: error_info[:type],
-
message: error_info[:message],
-
recoverable: error_info[:recoverable]
-
})
-
end
-
-
private
-
-
def broadcast_event(event_type, data)
-
channels = determine_channels
-
-
channels.each do |channel|
-
ActionCable.server.broadcast(channel, {
-
event: event_type,
-
data: data,
-
metadata: event_metadata
-
})
-
end
-
rescue StandardError => e
-
Rails.logger.error "Failed to broadcast compliance event: #{e.message}"
-
end
-
-
def determine_channels
-
channels = []
-
-
# Brand-wide channel
-
channels << "brand_compliance_#{brand_id}"
-
-
# Session-specific channel if available
-
channels << "compliance_session_#{session_id}" if session_id
-
-
# User-specific channel if available
-
channels << "user_compliance_#{user_id}" if user_id
-
-
channels
-
end
-
-
def event_metadata
-
{
-
brand_id: brand_id,
-
session_id: session_id,
-
user_id: user_id,
-
timestamp: Time.current.iso8601,
-
server_time: Time.current.to_f
-
}
-
end
-
-
def sanitize_violation(violation)
-
{
-
id: violation[:id],
-
type: violation[:type],
-
severity: violation[:severity],
-
message: violation[:message],
-
validator: violation[:validator_type],
-
position: violation[:position]
-
}
-
end
-
-
def sanitize_suggestion(suggestion)
-
{
-
type: suggestion[:type],
-
priority: suggestion[:priority],
-
title: suggestion[:title],
-
description: truncate_content(suggestion[:description]),
-
effort_level: suggestion[:effort_level]
-
}
-
end
-
-
def truncate_content(content, max_length = 200)
-
return content if content.nil? || content.length <= max_length
-
-
"#{content[0...max_length]}..."
-
end
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class NlpAnalyzer < BaseValidator
-
ANALYSIS_TYPES = %i[
-
tone sentiment readability brand_alignment
-
keyword_density emotion style coherence
-
].freeze
-
-
def initialize(brand, content, options = {})
-
super
-
@llm_service = options[:llm_service] || LlmService.new
-
@analysis_cache = {}
-
end
-
-
def validate
-
analyze_all_aspects
-
-
# Check tone compliance
-
check_tone_compliance
-
-
# Check sentiment alignment
-
check_sentiment_alignment
-
-
# Check readability standards
-
check_readability_standards
-
-
# Check brand voice alignment
-
check_brand_voice_alignment
-
-
# Check messaging consistency
-
check_messaging_consistency
-
-
# Analyze emotional resonance
-
check_emotional_resonance
-
-
# Check style consistency
-
check_style_consistency
-
-
{ violations: @violations, suggestions: @suggestions, analysis: @analysis_cache }
-
end
-
-
def analyze_aspect(aspect_type)
-
return @analysis_cache[aspect_type] if @analysis_cache[aspect_type]
-
-
analysis = case aspect_type
-
when :tone then analyze_tone
-
when :sentiment then analyze_sentiment
-
when :readability then analyze_readability
-
when :brand_alignment then analyze_brand_alignment
-
when :keyword_density then analyze_keyword_density
-
when :emotion then analyze_emotion
-
when :style then analyze_style
-
when :coherence then analyze_coherence
-
else
-
raise ArgumentError, "Unknown analysis type: #{aspect_type}"
-
end
-
-
@analysis_cache[aspect_type] = analysis
-
analysis
-
end
-
-
private
-
-
def analyze_all_aspects
-
ANALYSIS_TYPES.each { |type| analyze_aspect(type) }
-
end
-
-
def analyze_tone
-
cached_result("tone_analysis") do
-
prompt = build_tone_analysis_prompt
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.3,
-
system_message: "You are an expert content analyst specializing in tone and voice analysis."
-
})
-
-
parse_json_response(response) || default_tone_analysis
-
end
-
end
-
-
def analyze_sentiment
-
cached_result("sentiment_analysis") do
-
prompt = build_sentiment_analysis_prompt
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.2
-
})
-
-
parse_json_response(response) || default_sentiment_analysis
-
end
-
end
-
-
def analyze_readability
-
cached_result("readability_analysis") do
-
# Calculate various readability metrics
-
{
-
flesch_kincaid_score: calculate_flesch_kincaid,
-
gunning_fog_index: calculate_gunning_fog,
-
average_sentence_length: calculate_average_sentence_length,
-
average_word_length: calculate_average_word_length,
-
complex_word_percentage: calculate_complex_word_percentage,
-
readability_grade: determine_readability_grade
-
}
-
end
-
end
-
-
def analyze_brand_alignment
-
cached_result("brand_alignment_analysis") do
-
prompt = build_brand_alignment_prompt
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.4,
-
max_tokens: 1500
-
})
-
-
parse_json_response(response) || default_brand_alignment
-
end
-
end
-
-
def analyze_keyword_density
-
cached_result("keyword_density_analysis") do
-
keywords = extract_brand_keywords
-
content_words = tokenize_content
-
-
density_map = {}
-
keywords.each do |keyword|
-
count = content_words.count { |word| word.downcase == keyword.downcase }
-
density = (count.to_f / content_words.length * 100).round(2)
-
density_map[keyword] = {
-
count: count,
-
density: density,
-
optimal_range: determine_optimal_density(keyword)
-
}
-
end
-
-
{
-
keyword_densities: density_map,
-
total_keywords: keywords.length,
-
content_length: content_words.length
-
}
-
end
-
end
-
-
def analyze_emotion
-
cached_result("emotion_analysis") do
-
prompt = build_emotion_analysis_prompt
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.5
-
})
-
-
parse_json_response(response) || default_emotion_analysis
-
end
-
end
-
-
def analyze_style
-
cached_result("style_analysis") do
-
{
-
sentence_variety: analyze_sentence_variety,
-
paragraph_structure: analyze_paragraph_structure,
-
transition_usage: analyze_transitions,
-
active_passive_ratio: calculate_active_passive_ratio,
-
formality_level: detect_formality_level
-
}
-
end
-
end
-
-
def analyze_coherence
-
cached_result("coherence_analysis") do
-
prompt = build_coherence_analysis_prompt
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.3
-
})
-
-
parse_json_response(response) || default_coherence_analysis
-
end
-
end
-
-
# Validation checks
-
def check_tone_compliance
-
tone_analysis = analyze_aspect(:tone)
-
expected_tone = brand.latest_analysis&.voice_attributes&.dig("tone", "primary") || "professional"
-
-
detected_tone = tone_analysis[:primary_tone]
-
confidence = tone_analysis[:confidence]
-
-
if !tone_compatible?(detected_tone, expected_tone)
-
add_violation(
-
type: "tone_mismatch",
-
severity: confidence > 0.8 ? "high" : "medium",
-
message: "Content tone '#{detected_tone}' doesn't match brand tone '#{expected_tone}'",
-
details: {
-
expected: expected_tone,
-
detected: detected_tone,
-
confidence: confidence,
-
secondary_tones: tone_analysis[:secondary_tones]
-
}
-
)
-
elsif confidence < 0.6
-
add_suggestion(
-
type: "tone_clarity",
-
message: "Consider strengthening the #{expected_tone} tone",
-
details: {
-
current_confidence: confidence,
-
detected_tones: tone_analysis[:all_tones]
-
}
-
)
-
end
-
end
-
-
def check_sentiment_alignment
-
sentiment = analyze_aspect(:sentiment)
-
brand_sentiment = brand.latest_analysis&.sentiment_profile || { "positive" => 0.7 }
-
-
sentiment_score = sentiment[:overall_score]
-
expected_range = determine_expected_sentiment_range(brand_sentiment)
-
-
if !sentiment_score.between?(expected_range[:min], expected_range[:max])
-
add_violation(
-
type: "sentiment_misalignment",
-
severity: "medium",
-
message: "Content sentiment (#{sentiment_score.round(2)}) outside brand range (#{expected_range[:min]}-#{expected_range[:max]})",
-
details: {
-
current_sentiment: sentiment_score,
-
expected_range: expected_range,
-
sentiment_breakdown: sentiment[:breakdown]
-
}
-
)
-
end
-
end
-
-
def check_readability_standards
-
readability = analyze_aspect(:readability)
-
target_grade = brand.brand_guidelines.by_category("readability").first&.metadata&.dig("target_grade") || 8
-
-
current_grade = readability[:readability_grade]
-
-
if (current_grade - target_grade).abs > 2
-
severity = (current_grade - target_grade).abs > 4 ? "high" : "medium"
-
-
add_violation(
-
type: "readability_mismatch",
-
severity: severity,
-
message: "Readability grade #{current_grade} significantly differs from target #{target_grade}",
-
details: {
-
current_grade: current_grade,
-
target_grade: target_grade,
-
metrics: readability
-
}
-
)
-
elsif (current_grade - target_grade).abs > 1
-
add_suggestion(
-
type: "readability_adjustment",
-
message: "Consider adjusting readability closer to grade #{target_grade}",
-
details: {
-
current_grade: current_grade,
-
suggestions: suggest_readability_improvements(readability, target_grade)
-
}
-
)
-
end
-
end
-
-
def check_brand_voice_alignment
-
alignment = analyze_aspect(:brand_alignment)
-
alignment_score = alignment[:overall_score] || 0
-
-
if alignment_score < 0.5
-
add_violation(
-
type: "brand_voice_misalignment",
-
severity: "high",
-
message: "Content doesn't align well with brand voice (#{(alignment_score * 100).round}% match)",
-
details: {
-
alignment_score: alignment_score,
-
missing_elements: alignment[:missing_elements],
-
conflicting_elements: alignment[:conflicting_elements]
-
}
-
)
-
elsif alignment_score < 0.7
-
add_suggestion(
-
type: "brand_voice_enhancement",
-
message: "Strengthen brand voice elements",
-
details: {
-
current_score: alignment_score,
-
improvement_areas: alignment[:improvement_suggestions]
-
},
-
priority: "high"
-
)
-
end
-
end
-
-
def check_messaging_consistency
-
brand_messages = extract_brand_messages
-
alignment = analyze_aspect(:brand_alignment)
-
-
missing_messages = alignment[:missing_key_messages] || []
-
-
if missing_messages.length > brand_messages.length * 0.5
-
add_violation(
-
type: "key_message_absence",
-
severity: "medium",
-
message: "Missing #{missing_messages.length} key brand messages",
-
details: {
-
missing_messages: missing_messages,
-
total_expected: brand_messages.length
-
}
-
)
-
elsif missing_messages.any?
-
add_suggestion(
-
type: "message_incorporation",
-
message: "Consider incorporating these key messages",
-
details: {
-
missing_messages: missing_messages.first(3)
-
}
-
)
-
end
-
end
-
-
def check_emotional_resonance
-
emotion = analyze_aspect(:emotion)
-
target_emotions = brand.latest_analysis&.emotional_targets || ["trust", "confidence"]
-
-
detected_emotions = emotion[:primary_emotions] || []
-
emotion_match = (detected_emotions & target_emotions).length.to_f / target_emotions.length
-
-
if emotion_match < 0.3
-
add_violation(
-
type: "emotional_disconnect",
-
severity: "medium",
-
message: "Content doesn't evoke target brand emotions",
-
details: {
-
target_emotions: target_emotions,
-
detected_emotions: detected_emotions,
-
match_percentage: (emotion_match * 100).round
-
}
-
)
-
elsif emotion_match < 0.6
-
add_suggestion(
-
type: "emotional_enhancement",
-
message: "Strengthen emotional connection with brand values",
-
details: {
-
current_emotions: detected_emotions,
-
target_emotions: target_emotions,
-
suggestions: suggest_emotional_improvements(emotion, target_emotions)
-
}
-
)
-
end
-
end
-
-
def check_style_consistency
-
style = analyze_aspect(:style)
-
guidelines = brand.brand_guidelines.by_category("style")
-
-
# Check sentence variety
-
if style[:sentence_variety][:score] < 0.4
-
add_suggestion(
-
type: "sentence_variety",
-
message: "Vary sentence structure for better flow",
-
details: {
-
current_variety: style[:sentence_variety],
-
suggestions: ["Mix short and long sentences", "Use different sentence openings"]
-
}
-
)
-
end
-
-
# Check formality level
-
expected_formality = guidelines.find { |g| g.metadata&.dig("formality_level") }&.metadata&.dig("formality_level") || "moderate"
-
if !formality_matches?(style[:formality_level], expected_formality)
-
add_violation(
-
type: "formality_mismatch",
-
severity: "low",
-
message: "Formality level '#{style[:formality_level]}' doesn't match expected '#{expected_formality}'",
-
details: {
-
current: style[:formality_level],
-
expected: expected_formality
-
}
-
)
-
end
-
end
-
-
# Helper methods
-
def build_tone_analysis_prompt
-
<<~PROMPT
-
Analyze the tone of the following content and provide a detailed assessment.
-
-
Content:
-
#{content}
-
-
Provide analysis in this JSON structure:
-
{
-
"primary_tone": "professional|casual|formal|friendly|authoritative|conversational|etc",
-
"secondary_tones": ["tone1", "tone2"],
-
"confidence": 0.0-1.0,
-
"all_tones": {
-
"tone_name": confidence_score
-
},
-
"tone_consistency": 0.0-1.0,
-
"tone_shifts": [
-
{
-
"position": "paragraph/sentence reference",
-
"from_tone": "tone1",
-
"to_tone": "tone2"
-
}
-
]
-
}
-
PROMPT
-
end
-
-
def build_sentiment_analysis_prompt
-
<<~PROMPT
-
Analyze the sentiment of the following content.
-
-
Content:
-
#{content}
-
-
Provide analysis in this JSON structure:
-
{
-
"overall_score": -1.0 to 1.0,
-
"breakdown": {
-
"positive": 0.0-1.0,
-
"negative": 0.0-1.0,
-
"neutral": 0.0-1.0
-
},
-
"sentiment_flow": [
-
{
-
"section": "identifier",
-
"score": -1.0 to 1.0
-
}
-
],
-
"emotional_words": {
-
"positive": ["word1", "word2"],
-
"negative": ["word1", "word2"]
-
}
-
}
-
PROMPT
-
end
-
-
def build_brand_alignment_prompt
-
brand_voice = brand.brand_voice_attributes
-
key_messages = brand.messaging_framework&.key_messages || {}
-
-
<<~PROMPT
-
Analyze how well the content aligns with the brand voice and messaging.
-
-
Content:
-
#{content}
-
-
Brand Voice Attributes:
-
#{brand_voice.to_json}
-
-
Key Messages:
-
#{key_messages.to_json}
-
-
Provide analysis in this JSON structure:
-
{
-
"overall_score": 0.0-1.0,
-
"voice_alignment": {
-
"matching_attributes": ["attribute1", "attribute2"],
-
"missing_attributes": ["attribute1", "attribute2"],
-
"conflicting_attributes": ["attribute1", "attribute2"]
-
},
-
"message_alignment": {
-
"incorporated_messages": ["message1", "message2"],
-
"missing_key_messages": ["message1", "message2"],
-
"message_clarity": 0.0-1.0
-
},
-
"improvement_suggestions": [
-
{
-
"area": "voice|messaging|tone",
-
"suggestion": "specific improvement",
-
"priority": "high|medium|low"
-
}
-
],
-
"missing_elements": ["element1", "element2"],
-
"conflicting_elements": ["element1", "element2"]
-
}
-
PROMPT
-
end
-
-
def build_emotion_analysis_prompt
-
<<~PROMPT
-
Analyze the emotional content and impact of the following text.
-
-
Content:
-
#{content}
-
-
Provide analysis in this JSON structure:
-
{
-
"primary_emotions": ["emotion1", "emotion2", "emotion3"],
-
"emotion_intensity": {
-
"emotion_name": 0.0-1.0
-
},
-
"emotional_arc": [
-
{
-
"section": "beginning|middle|end",
-
"dominant_emotion": "emotion",
-
"intensity": 0.0-1.0
-
}
-
],
-
"emotional_triggers": [
-
{
-
"phrase": "triggering phrase",
-
"emotion": "triggered emotion",
-
"strength": 0.0-1.0
-
}
-
]
-
}
-
PROMPT
-
end
-
-
def build_coherence_analysis_prompt
-
<<~PROMPT
-
Analyze the coherence and logical flow of the following content.
-
-
Content:
-
#{content}
-
-
Provide analysis in this JSON structure:
-
{
-
"overall_coherence": 0.0-1.0,
-
"logical_flow": 0.0-1.0,
-
"topic_consistency": 0.0-1.0,
-
"transition_quality": 0.0-1.0,
-
"issues": [
-
{
-
"type": "logical_gap|topic_shift|unclear_transition",
-
"location": "paragraph/sentence reference",
-
"severity": "high|medium|low",
-
"suggestion": "how to fix"
-
}
-
],
-
"strengths": ["strength1", "strength2"]
-
}
-
PROMPT
-
end
-
-
def parse_json_response(response)
-
return nil if response.nil? || response.empty?
-
-
begin
-
if response.is_a?(String)
-
JSON.parse(response, symbolize_names: true)
-
else
-
response
-
end
-
rescue JSON::ParserError => e
-
Rails.logger.error "Failed to parse LLM JSON response: #{e.message}"
-
nil
-
end
-
end
-
-
def calculate_flesch_kincaid
-
sentences = content.split(/[.!?]+/).reject(&:blank?)
-
words = tokenize_content
-
syllables = words.sum { |word| count_syllables(word) }
-
-
return 0 if sentences.empty? || words.empty?
-
-
score = 206.835 - 1.015 * (words.length.to_f / sentences.length) - 84.6 * (syllables.to_f / words.length)
-
score.round(1)
-
end
-
-
def calculate_gunning_fog
-
sentences = content.split(/[.!?]+/).reject(&:blank?)
-
words = tokenize_content
-
complex_words = words.count { |word| count_syllables(word) >= 3 }
-
-
return 0 if sentences.empty? || words.empty?
-
-
score = 0.4 * ((words.length.to_f / sentences.length) + 100 * (complex_words.to_f / words.length))
-
score.round(1)
-
end
-
-
def calculate_average_sentence_length
-
sentences = content.split(/[.!?]+/).reject(&:blank?)
-
words = tokenize_content
-
-
return 0 if sentences.empty?
-
-
(words.length.to_f / sentences.length).round(1)
-
end
-
-
def calculate_average_word_length
-
words = tokenize_content
-
return 0 if words.empty?
-
-
total_length = words.sum(&:length)
-
(total_length.to_f / words.length).round(1)
-
end
-
-
def calculate_complex_word_percentage
-
words = tokenize_content
-
complex_words = words.count { |word| count_syllables(word) >= 3 }
-
-
return 0 if words.empty?
-
-
((complex_words.to_f / words.length) * 100).round(1)
-
end
-
-
def determine_readability_grade
-
flesch_score = calculate_flesch_kincaid
-
-
case flesch_score
-
when 90..100 then 5
-
when 80..89 then 6
-
when 70..79 then 7
-
when 60..69 then 8
-
when 50..59 then 10
-
when 30..49 then 13
-
when 0..29 then 16
-
else 12
-
end
-
end
-
-
def tokenize_content
-
content.downcase.gsub(/[^\w\s]/, ' ').split.reject { |w| w.length < 2 }
-
end
-
-
def count_syllables(word)
-
return 1 if word.length <= 3
-
-
word = word.downcase
-
vowels = "aeiouy"
-
syllable_count = 0
-
previous_was_vowel = false
-
-
word.each_char do |char|
-
is_vowel = vowels.include?(char)
-
if is_vowel && !previous_was_vowel
-
syllable_count += 1
-
end
-
previous_was_vowel = is_vowel
-
end
-
-
# Adjust for silent e
-
syllable_count -= 1 if word.end_with?('e') && syllable_count > 1
-
-
[syllable_count, 1].max
-
end
-
-
def analyze_sentence_variety
-
sentences = content.split(/[.!?]+/).reject(&:blank?)
-
return { score: 0, variety: "none" } if sentences.empty?
-
-
lengths = sentences.map { |s| s.split.length }
-
-
# Calculate standard deviation
-
mean = lengths.sum.to_f / lengths.length
-
variance = lengths.sum { |l| (l - mean) ** 2 } / lengths.length
-
std_dev = Math.sqrt(variance)
-
-
# Normalize to 0-1 score
-
variety_score = [std_dev / mean, 1.0].min
-
-
{
-
score: variety_score.round(2),
-
variety: case variety_score
-
when 0..0.2 then "very_low"
-
when 0.2..0.4 then "low"
-
when 0.4..0.6 then "moderate"
-
when 0.6..0.8 then "good"
-
else "excellent"
-
end,
-
stats: {
-
mean_length: mean.round(1),
-
std_deviation: std_dev.round(1),
-
min_length: lengths.min,
-
max_length: lengths.max
-
}
-
}
-
end
-
-
def analyze_paragraph_structure
-
paragraphs = content.split(/\n\n+/).reject(&:blank?)
-
-
{
-
count: paragraphs.length,
-
average_length: paragraphs.sum { |p| p.split.length } / paragraphs.length.to_f,
-
consistency: calculate_paragraph_consistency(paragraphs)
-
}
-
end
-
-
def analyze_transitions
-
transition_words = %w[
-
however therefore furthermore moreover consequently
-
additionally nevertheless nonetheless meanwhile
-
alternatively subsequently thus hence accordingly
-
]
-
-
sentences = content.split(/[.!?]+/)
-
transitions_used = 0
-
-
sentences.each do |sentence|
-
sentence_lower = sentence.downcase
-
transitions_used += 1 if transition_words.any? { |t| sentence_lower.include?(t) }
-
end
-
-
{
-
count: transitions_used,
-
percentage: (transitions_used.to_f / sentences.length * 100).round(1),
-
quality: transitions_used > sentences.length * 0.2 ? "good" : "needs_improvement"
-
}
-
end
-
-
def calculate_active_passive_ratio
-
# Simplified active/passive detection
-
passive_indicators = /\b(was|were|been|being|is|are|am)\s+\w+ed\b/
-
sentences = content.split(/[.!?]+/)
-
-
passive_count = sentences.count { |s| s.match?(passive_indicators) }
-
active_count = sentences.length - passive_count
-
-
{
-
active: active_count,
-
passive: passive_count,
-
ratio: active_count.to_f / [passive_count, 1].max
-
}
-
end
-
-
def detect_formality_level
-
formal_indicators = %w[therefore furthermore consequently thus hence moreover]
-
informal_indicators = %w[gonna wanna gotta kinda sorta yeah yep nope]
-
contractions = /\b\w+'(ll|ve|re|d|s|t)\b/
-
-
content_lower = content.downcase
-
-
formal_score = formal_indicators.count { |word| content_lower.include?(word) }
-
informal_score = informal_indicators.count { |word| content_lower.include?(word) }
-
informal_score += content.scan(contractions).length
-
-
if formal_score > informal_score * 2
-
"formal"
-
elsif informal_score > formal_score * 2
-
"informal"
-
elsif formal_score > informal_score
-
"moderate_formal"
-
elsif informal_score > formal_score
-
"moderate_informal"
-
else
-
"neutral"
-
end
-
end
-
-
def tone_compatible?(detected, expected)
-
compatible_tones = {
-
"professional" => ["professional", "formal", "authoritative"],
-
"casual" => ["casual", "conversational", "friendly"],
-
"friendly" => ["friendly", "casual", "conversational", "warm"],
-
"formal" => ["formal", "professional", "authoritative"],
-
"authoritative" => ["authoritative", "professional", "formal", "expert"]
-
}
-
-
expected_group = compatible_tones[expected] || [expected]
-
expected_group.include?(detected)
-
end
-
-
def determine_expected_sentiment_range(brand_sentiment)
-
base_positive = brand_sentiment["positive"] || 0.7
-
-
{
-
min: base_positive - 0.2,
-
max: [base_positive + 0.2, 1.0].min
-
}
-
end
-
-
def suggest_readability_improvements(readability, target_grade)
-
suggestions = []
-
-
current_grade = readability[:readability_grade]
-
-
if current_grade > target_grade
-
suggestions << "Simplify complex sentences"
-
suggestions << "Use shorter words where possible"
-
suggestions << "Break up long paragraphs"
-
else
-
suggestions << "Add more descriptive language"
-
suggestions << "Use more varied vocabulary"
-
suggestions << "Combine short, choppy sentences"
-
end
-
-
suggestions
-
end
-
-
def extract_brand_keywords
-
keywords = []
-
-
# From messaging framework
-
if brand.messaging_framework
-
keywords += brand.messaging_framework.key_messages.values.flatten
-
keywords += brand.messaging_framework.value_propositions.values.flatten
-
end
-
-
# From brand analysis
-
if brand.latest_analysis
-
keywords += brand.latest_analysis.keywords || []
-
end
-
-
keywords.uniq.map(&:downcase)
-
end
-
-
def extract_brand_messages
-
messages = []
-
-
if brand.messaging_framework
-
messages += brand.messaging_framework.key_messages.values.flatten
-
messages += brand.messaging_framework.value_propositions.values.flatten
-
end
-
-
messages.uniq
-
end
-
-
def determine_optimal_density(keyword)
-
# Primary keywords should appear more frequently
-
if brand.messaging_framework&.key_messages&.values&.flatten&.include?(keyword)
-
{ min: 1.0, max: 3.0 }
-
else
-
{ min: 0.5, max: 2.0 }
-
end
-
end
-
-
def suggest_emotional_improvements(current_emotion, target_emotions)
-
suggestions = []
-
-
missing_emotions = target_emotions - current_emotion[:primary_emotions]
-
-
emotion_techniques = {
-
"trust" => "Include testimonials, credentials, or guarantees",
-
"excitement" => "Use dynamic language and emphasize benefits",
-
"confidence" => "Highlight expertise and success stories",
-
"warmth" => "Use personal anecdotes and inclusive language",
-
"innovation" => "Emphasize cutting-edge features and forward-thinking"
-
}
-
-
missing_emotions.each do |emotion|
-
if technique = emotion_techniques[emotion]
-
suggestions << technique
-
end
-
end
-
-
suggestions
-
end
-
-
def formality_matches?(detected, expected)
-
formality_groups = {
-
"formal" => ["formal", "moderate_formal"],
-
"informal" => ["informal", "moderate_informal"],
-
"neutral" => ["neutral", "moderate_formal", "moderate_informal"]
-
}
-
-
expected_group = formality_groups[expected] || [expected]
-
expected_group.include?(detected)
-
end
-
-
def calculate_paragraph_consistency(paragraphs)
-
return 1.0 if paragraphs.length <= 1
-
-
lengths = paragraphs.map { |p| p.split.length }
-
mean = lengths.sum.to_f / lengths.length
-
variance = lengths.sum { |l| (l - mean) ** 2 } / lengths.length
-
-
# Lower variance = more consistent
-
consistency = 1.0 - ([Math.sqrt(variance) / mean, 1.0].min)
-
consistency.round(2)
-
end
-
-
# Default analysis results for fallback
-
def default_tone_analysis
-
{
-
primary_tone: "neutral",
-
secondary_tones: [],
-
confidence: 0.5,
-
all_tones: { "neutral" => 0.5 },
-
tone_consistency: 0.5,
-
tone_shifts: []
-
}
-
end
-
-
def default_sentiment_analysis
-
{
-
overall_score: 0.0,
-
breakdown: { positive: 0.33, negative: 0.33, neutral: 0.34 },
-
sentiment_flow: [],
-
emotional_words: { positive: [], negative: [] }
-
}
-
end
-
-
def default_brand_alignment
-
{
-
overall_score: 0.5,
-
voice_alignment: {
-
matching_attributes: [],
-
missing_attributes: [],
-
conflicting_attributes: []
-
},
-
message_alignment: {
-
incorporated_messages: [],
-
missing_key_messages: [],
-
message_clarity: 0.5
-
},
-
improvement_suggestions: [],
-
missing_elements: [],
-
conflicting_elements: []
-
}
-
end
-
-
def default_emotion_analysis
-
{
-
primary_emotions: ["neutral"],
-
emotion_intensity: { "neutral" => 0.5 },
-
emotional_arc: [],
-
emotional_triggers: []
-
}
-
end
-
-
def default_coherence_analysis
-
{
-
overall_coherence: 0.5,
-
logical_flow: 0.5,
-
topic_consistency: 0.5,
-
transition_quality: 0.5,
-
issues: [],
-
strengths: []
-
}
-
end
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class RuleEngine
-
attr_reader :brand, :rules_cache
-
-
RULE_PRIORITIES = {
-
mandatory: 100,
-
critical: 90,
-
high: 70,
-
medium: 50,
-
low: 30,
-
optional: 10
-
}.freeze
-
-
def initialize(brand)
-
@brand = brand
-
@rules_cache = {}
-
load_rules
-
end
-
-
def evaluate(content, context = {})
-
results = {
-
passed: [],
-
failed: [],
-
warnings: [],
-
score: 0.0
-
}
-
-
# Get applicable rules based on context
-
applicable_rules = filter_rules_by_context(context)
-
-
# Evaluate rules in priority order
-
applicable_rules.each do |rule|
-
result = evaluate_rule(rule, content, context)
-
-
case result[:status]
-
when :passed
-
results[:passed] << result
-
when :failed
-
results[:failed] << result
-
when :warning
-
results[:warnings] << result
-
end
-
end
-
-
# Calculate compliance score
-
results[:score] = calculate_score(results, applicable_rules)
-
results[:rule_conflicts] = detect_conflicts(results[:failed])
-
-
results
-
end
-
-
def get_rules_for_category(category)
-
@rules_cache[category] || []
-
end
-
-
def add_dynamic_rule(rule_definition)
-
rule = build_rule(rule_definition)
-
category = rule[:category] || "dynamic"
-
-
@rules_cache[category] ||= []
-
@rules_cache[category] << rule
-
-
# Sort by priority
-
@rules_cache[category].sort_by! { |r| -r[:priority] }
-
end
-
-
def build_rule(rule_definition)
-
{
-
id: rule_definition[:id] || "dynamic_#{SecureRandom.hex(8)}",
-
source: "dynamic",
-
category: rule_definition[:category] || "general",
-
type: rule_definition[:type],
-
content: rule_definition[:content],
-
priority: rule_definition[:priority] || 50,
-
mandatory: rule_definition[:mandatory] || false,
-
metadata: rule_definition[:metadata] || {},
-
evaluator: rule_definition[:evaluator] || ->(content, _context) { true }
-
}
-
end
-
-
private
-
-
def load_rules
-
# Try to load from cache first
-
cached_rules = Rails.cache.read("compiled_rules:#{brand.id}")
-
-
if cached_rules.present?
-
# Restore cached rules and regenerate evaluators
-
@rules_cache = cached_rules
-
restore_evaluators
-
else
-
# Load fresh rules
-
load_brand_guidelines
-
load_global_rules
-
load_industry_rules if brand.industry.present?
-
cache_compiled_rules
-
end
-
end
-
-
def load_brand_guidelines
-
brand.brand_guidelines.active.each do |guideline|
-
rule = {
-
id: "brand_#{guideline.id}",
-
source: "brand_guideline",
-
category: guideline.category,
-
type: guideline.rule_type,
-
content: guideline.rule_content,
-
priority: calculate_priority(guideline),
-
mandatory: guideline.mandatory?,
-
metadata: guideline.metadata || {},
-
evaluator: build_evaluator(guideline)
-
}
-
-
category = guideline.category || "general"
-
@rules_cache[category] ||= []
-
@rules_cache[category] << rule
-
end
-
end
-
-
def load_global_rules
-
# Load system-wide compliance rules
-
global_rules = [
-
{
-
id: "global_profanity",
-
category: "content",
-
type: "must_not",
-
content: "Content must not contain profanity",
-
priority: RULE_PRIORITIES[:critical],
-
mandatory: true,
-
evaluator: ->(content, _context) { !contains_profanity?(content) }
-
},
-
{
-
id: "global_legal",
-
category: "legal",
-
type: "must",
-
content: "Content must include required legal disclaimers",
-
priority: RULE_PRIORITIES[:high],
-
mandatory: true,
-
evaluator: ->(content, context) { check_legal_requirements(content, context) }
-
},
-
{
-
id: "global_accessibility",
-
category: "accessibility",
-
type: "should",
-
content: "Content should follow accessibility guidelines",
-
priority: RULE_PRIORITIES[:medium],
-
mandatory: false,
-
evaluator: ->(content, context) { check_accessibility(content, context) }
-
}
-
]
-
-
global_rules.each do |rule|
-
category = rule[:category]
-
@rules_cache[category] ||= []
-
@rules_cache[category] << rule
-
end
-
end
-
-
def load_industry_rules
-
# Load industry-specific compliance rules without caching the Proc objects
-
industry_rules = case brand.industry
-
when "healthcare"
-
load_healthcare_rules
-
when "finance"
-
load_finance_rules
-
when "technology"
-
load_technology_rules
-
else
-
[]
-
end
-
-
industry_rules.each do |rule|
-
category = rule[:category]
-
@rules_cache[category] ||= []
-
@rules_cache[category] << rule
-
end
-
end
-
-
def build_evaluator(guideline)
-
case guideline.rule_type
-
when "must", "do"
-
->(content, _context) { content_matches_positive_rule?(content, guideline) }
-
when "must_not", "dont", "avoid"
-
->(content, _context) { !content_matches_negative_rule?(content, guideline) }
-
when "should", "prefer"
-
->(content, _context) { content_follows_suggestion?(content, guideline) }
-
else
-
->(content, _context) { true }
-
end
-
end
-
-
def evaluate_rule(rule, content, context)
-
begin
-
passed = rule[:evaluator].call(content, context)
-
-
{
-
rule_id: rule[:id],
-
status: determine_status(passed, rule),
-
message: build_message(passed, rule),
-
severity: determine_severity(rule),
-
details: {
-
rule_type: rule[:type],
-
category: rule[:category],
-
mandatory: rule[:mandatory]
-
}
-
}
-
rescue StandardError => e
-
Rails.logger.error "Rule evaluation error: #{e.message}"
-
{
-
rule_id: rule[:id],
-
status: :error,
-
message: "Error evaluating rule: #{rule[:content]}",
-
severity: "low",
-
error: e.message
-
}
-
end
-
end
-
-
def determine_status(passed, rule)
-
if passed
-
:passed
-
elsif rule[:mandatory]
-
:failed
-
else
-
:warning
-
end
-
end
-
-
def determine_severity(rule)
-
if rule[:mandatory]
-
priority_to_severity(rule[:priority])
-
else
-
"low"
-
end
-
end
-
-
def priority_to_severity(priority)
-
case priority
-
when 90..100 then "critical"
-
when 70..89 then "high"
-
when 50..69 then "medium"
-
else "low"
-
end
-
end
-
-
def calculate_priority(guideline)
-
base_priority = guideline.priority * 10
-
-
# Boost priority for mandatory rules
-
base_priority += 20 if guideline.mandatory?
-
-
# Cap at maximum
-
[base_priority, 100].min
-
end
-
-
def filter_rules_by_context(context)
-
all_rules = @rules_cache.values.flatten
-
-
# Filter based on content type
-
if context[:content_type].present?
-
all_rules = all_rules.select do |rule|
-
rule[:metadata].blank? ||
-
rule[:metadata][:content_types].blank? ||
-
rule[:metadata][:content_types].include?(context[:content_type])
-
end
-
end
-
-
# Filter based on channel
-
if context[:channel].present?
-
all_rules = all_rules.select do |rule|
-
rule[:metadata].blank? ||
-
rule[:metadata][:channels].blank? ||
-
rule[:metadata][:channels].include?(context[:channel])
-
end
-
end
-
-
# Sort by priority
-
all_rules.sort_by { |rule| -rule[:priority] }
-
end
-
-
def calculate_score(results, total_rules)
-
return 1.0 if total_rules.empty?
-
-
# Weight rules by priority
-
total_weight = 0.0
-
passed_weight = 0.0
-
-
results[:passed].each do |result|
-
rule = find_rule(result[:rule_id])
-
weight = rule[:priority] / 100.0
-
total_weight += weight
-
passed_weight += weight
-
end
-
-
results[:failed].each do |result|
-
rule = find_rule(result[:rule_id])
-
weight = rule[:priority] / 100.0
-
total_weight += weight
-
end
-
-
results[:warnings].each do |result|
-
rule = find_rule(result[:rule_id])
-
weight = rule[:priority] / 100.0
-
total_weight += weight
-
passed_weight += weight * 0.5 # Partial credit for warnings
-
end
-
-
return 0.0 if total_weight == 0
-
-
(passed_weight / total_weight).round(3)
-
end
-
-
def detect_conflicts(failed_results)
-
conflicts = []
-
-
failed_results.each_with_index do |result1, i|
-
failed_results[(i+1)..-1].each do |result2|
-
if rules_conflict?(result1, result2)
-
conflicts << {
-
rule1: result1[:rule_id],
-
rule2: result2[:rule_id],
-
type: "contradiction",
-
resolution: suggest_resolution(result1, result2)
-
}
-
end
-
end
-
end
-
-
conflicts
-
end
-
-
def rules_conflict?(result1, result2)
-
rule1 = find_rule(result1[:rule_id])
-
rule2 = find_rule(result2[:rule_id])
-
-
return false unless rule1 && rule2
-
-
# Check for contradictory rules
-
(rule1[:type] == "must" && rule2[:type] == "dont") ||
-
(rule1[:type] == "dont" && rule2[:type] == "must") ||
-
(rule1[:type] == "must" && rule2[:type] == "must_not") ||
-
(rule1[:type] == "must_not" && rule2[:type] == "must")
-
end
-
-
def suggest_resolution(result1, result2)
-
rule1 = find_rule(result1[:rule_id])
-
rule2 = find_rule(result2[:rule_id])
-
-
# Higher priority rule takes precedence
-
if rule1[:priority] > rule2[:priority]
-
"Follow rule #{rule1[:id]} (higher priority)"
-
elsif rule2[:priority] > rule1[:priority]
-
"Follow rule #{rule2[:id]} (higher priority)"
-
else
-
"Review both rules and update priorities"
-
end
-
end
-
-
def find_rule(rule_id)
-
@rules_cache.values.flatten.find { |rule| rule[:id] == rule_id }
-
end
-
-
def cache_compiled_rules
-
# Create a serializable version of rules cache without Proc evaluators
-
serializable_cache = {}
-
@rules_cache.each do |category, rules|
-
serializable_cache[category] = rules.map do |rule|
-
rule.except(:evaluator) # Remove non-serializable Proc evaluators
-
end
-
end
-
-
Rails.cache.write(
-
"compiled_rules:#{brand.id}",
-
serializable_cache,
-
expires_in: 1.hour
-
)
-
end
-
-
def restore_evaluators
-
@rules_cache.each do |category, rules|
-
rules.each do |rule|
-
next if rule[:evaluator].present? # Skip if evaluator already exists
-
-
# Regenerate evaluator based on rule type and source
-
rule[:evaluator] = case rule[:source]
-
when "brand_guideline"
-
build_evaluator_for_cached_rule(rule)
-
else
-
build_global_evaluator(rule)
-
end
-
end
-
end
-
end
-
-
def build_evaluator_for_cached_rule(rule)
-
case rule[:type]
-
when "must", "do"
-
->(content, _context) { content_matches_positive_rule_cached?(content, rule) }
-
when "must_not", "dont", "avoid"
-
->(content, _context) { !content_matches_negative_rule_cached?(content, rule) }
-
when "should", "prefer"
-
->(content, _context) { content_follows_suggestion_cached?(content, rule) }
-
else
-
->(content, _context) { true }
-
end
-
end
-
-
def build_global_evaluator(rule)
-
case rule[:id]
-
when "global_profanity"
-
->(content, _context) { !contains_profanity?(content) }
-
when "global_legal"
-
->(content, context) { check_legal_requirements(content, context) }
-
when "global_accessibility"
-
->(content, context) { check_accessibility(content, context) }
-
when "healthcare_hipaa"
-
->(content, _context) { !contains_phi?(content) }
-
when "finance_disclaimer"
-
->(content, context) { contains_required_disclaimer?(content, context) }
-
when "tech_accuracy"
-
->(content, _context) { validate_technical_accuracy(content) }
-
else
-
->(content, _context) { true }
-
end
-
end
-
-
# Helper methods for rule evaluation
-
def content_matches_positive_rule?(content, guideline)
-
keywords = extract_keywords(guideline.rule_content)
-
content_lower = content.downcase
-
-
keywords.any? { |keyword| content_lower.include?(keyword.downcase) }
-
end
-
-
def content_matches_negative_rule?(content, guideline)
-
keywords = extract_keywords(guideline.rule_content)
-
content_lower = content.downcase
-
-
keywords.any? { |keyword| content_lower.include?(keyword.downcase) }
-
end
-
-
def content_follows_suggestion?(content, guideline)
-
# More lenient check for suggestions
-
keywords = extract_keywords(guideline.rule_content)
-
content_lower = content.downcase
-
-
matching_keywords = keywords.count { |keyword| content_lower.include?(keyword.downcase) }
-
matching_keywords >= (keywords.length * 0.3) # 30% match threshold
-
end
-
-
def extract_keywords(text)
-
stop_words = %w[the a an and or but in on at to for of with as by that which who whom whose when where why how]
-
-
text.downcase
-
.split(/\W+/)
-
.reject { |word| stop_words.include?(word) || word.length < 3 }
-
.uniq
-
end
-
-
def contains_profanity?(content)
-
# Implement profanity detection
-
profanity_list = Rails.cache.fetch("profanity_list", expires_in: 1.day) do
-
# Load from database or external service
-
%w[badword1 badword2] # Placeholder
-
end
-
-
content_lower = content.downcase
-
profanity_list.any? { |word| content_lower.include?(word) }
-
end
-
-
def check_legal_requirements(content, context)
-
# Check for required legal disclaimers based on context
-
true # Placeholder
-
end
-
-
def check_accessibility(content, context)
-
# Check accessibility guidelines
-
true # Placeholder
-
end
-
-
def build_message(passed, rule)
-
if passed
-
"Complies with: #{rule[:content]}"
-
else
-
"Violates: #{rule[:content]}"
-
end
-
end
-
-
# Industry-specific rule loaders
-
def load_healthcare_rules
-
[
-
{
-
id: "healthcare_hipaa",
-
category: "legal",
-
type: "must_not",
-
content: "Must not disclose protected health information",
-
priority: RULE_PRIORITIES[:critical],
-
mandatory: true,
-
evaluator: ->(content, _context) { !contains_phi?(content) }
-
}
-
]
-
end
-
-
def load_finance_rules
-
[
-
{
-
id: "finance_disclaimer",
-
category: "legal",
-
type: "must",
-
content: "Must include investment risk disclaimer",
-
priority: RULE_PRIORITIES[:critical],
-
mandatory: true,
-
evaluator: ->(content, context) { contains_required_disclaimer?(content, context) }
-
}
-
]
-
end
-
-
def load_technology_rules
-
[
-
{
-
id: "tech_accuracy",
-
category: "content",
-
type: "must",
-
content: "Technical specifications must be accurate",
-
priority: RULE_PRIORITIES[:high],
-
mandatory: true,
-
evaluator: ->(content, _context) { validate_technical_accuracy(content) }
-
}
-
]
-
end
-
-
def contains_phi?(content)
-
# Check for protected health information patterns
-
false # Placeholder
-
end
-
-
def contains_required_disclaimer?(content, context)
-
# Check for required disclaimers
-
true # Placeholder
-
end
-
-
def validate_technical_accuracy(content)
-
# Validate technical claims
-
true # Placeholder
-
end
-
-
# Cached rule evaluation methods (work with rule hashes instead of guideline objects)
-
def content_matches_positive_rule_cached?(content, rule)
-
keywords = extract_keywords(rule[:content])
-
content_lower = content.downcase
-
-
keywords.any? { |keyword| content_lower.include?(keyword.downcase) }
-
end
-
-
def content_matches_negative_rule_cached?(content, rule)
-
keywords = extract_keywords(rule[:content])
-
content_lower = content.downcase
-
-
keywords.any? { |keyword| content_lower.include?(keyword.downcase) }
-
end
-
-
def content_follows_suggestion_cached?(content, rule)
-
# More lenient check for suggestions
-
keywords = extract_keywords(rule[:content])
-
content_lower = content.downcase
-
-
matching_keywords = keywords.count { |keyword| content_lower.include?(keyword.downcase) }
-
matching_keywords >= (keywords.length * 0.3) # 30% match threshold
-
end
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class SuggestionEngine
-
attr_reader :brand, :violations, :analysis_results
-
-
def initialize(brand, violations, analysis_results = {})
-
@brand = brand
-
@violations = violations
-
@analysis_results = analysis_results
-
@llm_service = LlmService.new
-
end
-
-
def generate_suggestions
-
suggestions = []
-
-
# Group violations by type for pattern analysis
-
grouped_violations = group_violations
-
-
# Generate contextual suggestions for each violation type
-
grouped_violations.each do |type, type_violations|
-
suggestions.concat(generate_suggestions_for_type(type, type_violations))
-
end
-
-
# Add proactive improvements based on analysis
-
suggestions.concat(generate_proactive_suggestions)
-
-
# Prioritize and deduplicate suggestions
-
prioritized_suggestions = prioritize_suggestions(suggestions)
-
-
# Generate implementation guidance
-
add_implementation_guidance(prioritized_suggestions)
-
end
-
-
def generate_fix(violation, content)
-
case violation[:type]
-
when "banned_words"
-
fix_banned_words(violation, content)
-
when "tone_mismatch"
-
fix_tone_mismatch(violation, content)
-
when "missing_required_element"
-
fix_missing_element(violation, content)
-
when "readability_mismatch"
-
fix_readability(violation, content)
-
else
-
generate_ai_fix(violation, content)
-
end
-
end
-
-
def suggest_alternatives(phrase, context = {})
-
prompt = build_alternatives_prompt(phrase, context)
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.7,
-
max_tokens: 500
-
})
-
-
parse_alternatives_response(response)
-
end
-
-
private
-
-
def group_violations
-
violations.group_by { |v| v[:type] }
-
end
-
-
def generate_suggestions_for_type(type, type_violations)
-
case type
-
when "tone_mismatch"
-
generate_tone_suggestions(type_violations)
-
when "banned_words"
-
generate_vocabulary_suggestions(type_violations)
-
when "missing_required_element"
-
generate_element_suggestions(type_violations)
-
when "readability_mismatch"
-
generate_readability_suggestions(type_violations)
-
when "brand_voice_misalignment"
-
generate_voice_suggestions(type_violations)
-
when "color_violation"
-
generate_color_suggestions(type_violations)
-
when "typography_violation"
-
generate_typography_suggestions(type_violations)
-
else
-
generate_generic_suggestions(type_violations)
-
end
-
end
-
-
def generate_tone_suggestions(violations)
-
suggestions = []
-
-
# Analyze the pattern of tone issues
-
expected_tones = violations.map { |v| v[:details][:expected] }.uniq
-
detected_tones = violations.map { |v| v[:details][:detected] }.uniq
-
-
if expected_tones.length == 1
-
target_tone = expected_tones.first
-
-
suggestions << {
-
type: "tone_adjustment",
-
priority: "high",
-
title: "Align content tone with brand voice",
-
description: "Adjust the overall tone to be more #{target_tone}",
-
specific_actions: generate_tone_actions(target_tone, detected_tones),
-
examples: generate_tone_examples(target_tone),
-
effort_level: "medium"
-
}
-
end
-
-
suggestions
-
end
-
-
def generate_tone_actions(target_tone, current_tones)
-
actions = []
-
-
tone_adjustments = {
-
"professional" => {
-
"casual" => ["Replace contractions with full forms", "Use more formal vocabulary", "Structure sentences more formally"],
-
"friendly" => ["Maintain warmth while adding authority", "Use industry terminology appropriately"]
-
},
-
"friendly" => {
-
"formal" => ["Use conversational language", "Add personal pronouns", "Include relatable examples"],
-
"professional" => ["Soften technical language", "Add warmth to explanations"]
-
},
-
"casual" => {
-
"formal" => ["Use contractions where appropriate", "Simplify complex sentences", "Add colloquialisms"],
-
"professional" => ["Relax the tone while maintaining credibility", "Use everyday language"]
-
}
-
}
-
-
current_tones.each do |current|
-
if tone_adjustments[target_tone] && tone_adjustments[target_tone][current]
-
actions.concat(tone_adjustments[target_tone][current])
-
end
-
end
-
-
actions.uniq
-
end
-
-
def generate_tone_examples(target_tone)
-
examples = {
-
"professional" => [
-
{ before: "We're gonna help you out!", after: "We will assist you with your needs." },
-
{ before: "Check this out!", after: "Please review the following information." }
-
],
-
"friendly" => [
-
{ before: "The user must complete the form.", after: "You'll need to fill out a quick form." },
-
{ before: "This is required.", after: "We'll need this from you." }
-
],
-
"casual" => [
-
{ before: "We are pleased to announce", after: "Hey, we've got some great news" },
-
{ before: "Please be advised", after: "Just wanted to let you know" }
-
]
-
}
-
-
examples[target_tone] || []
-
end
-
-
def generate_vocabulary_suggestions(violations)
-
suggestions = []
-
-
banned_words = violations.flat_map { |v| v[:details] }.uniq
-
-
suggestions << {
-
type: "vocabulary_replacement",
-
priority: "critical",
-
title: "Replace prohibited terminology",
-
description: "Remove or replace words that conflict with brand guidelines",
-
specific_actions: [
-
"Review and replace all instances of banned words",
-
"Update content to use approved brand terminology",
-
"Create a glossary of preferred alternatives"
-
],
-
word_replacements: generate_word_replacements(banned_words),
-
effort_level: "low"
-
}
-
-
suggestions
-
end
-
-
def generate_word_replacements(banned_words)
-
replacements = {}
-
-
# Get brand-specific alternatives
-
messaging_framework = brand.messaging_framework
-
preferred_terms = messaging_framework&.metadata&.dig("preferred_terms") || {}
-
-
banned_words.each do |word|
-
replacements[word] = find_alternatives_for_word(word, preferred_terms)
-
end
-
-
replacements
-
end
-
-
def find_alternatives_for_word(word, preferred_terms)
-
# Check if we have a direct mapping
-
return preferred_terms[word] if preferred_terms[word]
-
-
# Generate contextual alternatives
-
common_replacements = {
-
"cheap" => ["affordable", "value-priced", "economical"],
-
"expensive" => ["premium", "investment", "high-value"],
-
"problem" => ["challenge", "opportunity", "situation"],
-
"failure" => ["learning experience", "setback", "area for improvement"]
-
}
-
-
common_replacements[word.downcase] || ["[Review context for appropriate alternative]"]
-
end
-
-
def generate_element_suggestions(violations)
-
suggestions = []
-
-
missing_elements = violations.map { |v| v[:details][:category] }.uniq
-
-
suggestions << {
-
type: "content_addition",
-
priority: "high",
-
title: "Add required brand elements",
-
description: "Include mandatory elements missing from the content",
-
specific_actions: missing_elements.map { |element| "Add #{element}" },
-
templates: generate_element_templates(missing_elements),
-
effort_level: "medium"
-
}
-
-
suggestions
-
end
-
-
def generate_element_templates(elements)
-
templates = {}
-
-
element_mappings = {
-
"tagline" => brand.messaging_framework&.taglines&.dig("primary"),
-
"disclaimer" => brand.brand_guidelines.by_category("legal").first&.rule_content,
-
"contact" => generate_contact_template,
-
"cta" => generate_cta_template
-
}
-
-
elements.each do |element|
-
templates[element] = element_mappings[element] || "[Custom content required]"
-
end
-
-
templates
-
end
-
-
def generate_readability_suggestions(violations)
-
suggestions = []
-
-
readability_issues = violations.first[:details]
-
current_grade = readability_issues[:current_grade]
-
target_grade = readability_issues[:target_grade]
-
-
if current_grade > target_grade
-
suggestions << {
-
type: "simplification",
-
priority: "medium",
-
title: "Simplify content for target audience",
-
description: "Reduce complexity to match reading level #{target_grade}",
-
specific_actions: [
-
"Shorten sentences (aim for 15-20 words average)",
-
"Replace complex words with simpler alternatives",
-
"Break up long paragraphs",
-
"Use active voice",
-
"Add subheadings for better scanning"
-
],
-
examples: generate_simplification_examples,
-
effort_level: "high"
-
}
-
else
-
suggestions << {
-
type: "sophistication",
-
priority: "medium",
-
title: "Enhance content sophistication",
-
description: "Increase complexity to match reading level #{target_grade}",
-
specific_actions: [
-
"Use more varied sentence structures",
-
"Incorporate industry-specific terminology",
-
"Add nuanced explanations",
-
"Develop ideas more thoroughly"
-
],
-
effort_level: "medium"
-
}
-
end
-
-
suggestions
-
end
-
-
def generate_simplification_examples
-
[
-
{
-
before: "The implementation of our comprehensive solution necessitates a thorough evaluation of existing infrastructure.",
-
after: "To use our solution, we need to review your current setup."
-
},
-
{
-
before: "Utilize this functionality to optimize your workflow efficiency.",
-
after: "Use this feature to work faster."
-
}
-
]
-
end
-
-
def generate_voice_suggestions(violations)
-
suggestions = []
-
-
alignment_score = violations.first[:details][:alignment_score]
-
missing_elements = violations.first[:details][:missing_elements] || []
-
-
suggestions << {
-
type: "brand_voice_alignment",
-
priority: "high",
-
title: "Strengthen brand voice consistency",
-
description: "Align content more closely with established brand personality",
-
specific_actions: [
-
"Incorporate brand personality traits throughout",
-
"Use brand-specific phrases and expressions",
-
"Mirror the brand's communication style",
-
"Include brand storytelling elements"
-
],
-
voice_checklist: generate_voice_checklist,
-
missing_elements: missing_elements,
-
effort_level: "high"
-
}
-
-
suggestions
-
end
-
-
def generate_voice_checklist
-
voice_attributes = brand.brand_voice_attributes
-
-
checklist = []
-
-
voice_attributes.each do |category, attributes|
-
attributes.each do |key, value|
-
checklist << {
-
attribute: "#{category}.#{key}",
-
target: value,
-
check: "Does the content reflect #{value}?"
-
}
-
end
-
end
-
-
checklist
-
end
-
-
def generate_color_suggestions(violations)
-
suggestions = []
-
-
non_compliant_colors = violations.flat_map { |v| v[:details][:non_compliant_colors] }.uniq
-
-
suggestions << {
-
type: "color_correction",
-
priority: "high",
-
title: "Align colors with brand palette",
-
description: "Replace non-brand colors with approved alternatives",
-
specific_actions: [
-
"Update all color values to match brand guidelines",
-
"Ensure proper color usage hierarchy",
-
"Maintain color consistency across all elements"
-
],
-
color_mappings: generate_color_mappings(non_compliant_colors),
-
effort_level: "low"
-
}
-
-
suggestions
-
end
-
-
def generate_color_mappings(non_compliant_colors)
-
mappings = {}
-
brand_colors = brand.primary_colors + brand.secondary_colors
-
-
non_compliant_colors.each do |color|
-
mappings[color] = find_closest_brand_color(color, brand_colors)
-
end
-
-
mappings
-
end
-
-
def find_closest_brand_color(color, brand_colors)
-
return brand_colors.first if brand_colors.empty?
-
-
# Find the brand color with minimum color distance
-
closest = brand_colors.min_by do |brand_color|
-
color_distance(color, brand_color)
-
end
-
-
{
-
color: closest,
-
distance: color_distance(color, closest).round(2)
-
}
-
end
-
-
def color_distance(color1, color2)
-
# Simplified - would use proper color distance calculation
-
0.0
-
end
-
-
def generate_typography_suggestions(violations)
-
suggestions = []
-
-
non_compliant_fonts = violations.flat_map { |v| v[:details][:non_compliant_fonts] }.uniq
-
-
suggestions << {
-
type: "typography_alignment",
-
priority: "medium",
-
title: "Update typography to brand standards",
-
description: "Use only approved brand fonts",
-
specific_actions: [
-
"Replace non-brand fonts with approved alternatives",
-
"Ensure proper font hierarchy",
-
"Apply consistent font sizing and spacing"
-
],
-
font_mappings: generate_font_mappings(non_compliant_fonts),
-
effort_level: "medium"
-
}
-
-
suggestions
-
end
-
-
def generate_font_mappings(non_compliant_fonts)
-
mappings = {}
-
brand_fonts = brand.font_families
-
-
non_compliant_fonts.each do |font|
-
mappings[font] = suggest_brand_font(font, brand_fonts)
-
end
-
-
mappings
-
end
-
-
def suggest_brand_font(font, brand_fonts)
-
# Map common fonts to brand alternatives
-
font_categories = {
-
serif: ["Georgia", "Times New Roman", "Garamond"],
-
sans_serif: ["Arial", "Helvetica", "Verdana"],
-
monospace: ["Courier", "Consolas", "Monaco"]
-
}
-
-
# Determine font category
-
category = font_categories.find { |_, fonts| fonts.include?(font) }&.first || :sans_serif
-
-
# Return appropriate brand font
-
brand_fonts[category.to_s] || brand_fonts["primary"] || "Use primary brand font"
-
end
-
-
def generate_generic_suggestions(violations)
-
violations.map do |violation|
-
{
-
type: "compliance_fix",
-
priority: violation[:severity],
-
title: "Address: #{violation[:message]}",
-
description: "Fix compliance issue",
-
specific_actions: ["Review and correct the identified issue"],
-
effort_level: "medium"
-
}
-
end
-
end
-
-
def generate_proactive_suggestions
-
suggestions = []
-
-
# Based on analysis results, suggest improvements
-
if analysis_results[:nlp_analysis]
-
suggestions.concat(generate_nlp_based_suggestions)
-
end
-
-
if analysis_results[:visual_analysis]
-
suggestions.concat(generate_visual_based_suggestions)
-
end
-
-
suggestions
-
end
-
-
def generate_nlp_based_suggestions
-
suggestions = []
-
nlp = analysis_results[:nlp_analysis]
-
-
# Suggest improvements based on scores
-
if nlp[:tone][:confidence] < 0.8
-
suggestions << {
-
type: "tone_strengthening",
-
priority: "low",
-
title: "Strengthen brand tone consistency",
-
description: "Make the brand tone more prominent throughout the content",
-
specific_actions: [
-
"Use more characteristic brand expressions",
-
"Maintain consistent tone throughout all sections",
-
"Avoid tone shifts mid-content"
-
],
-
effort_level: "medium"
-
}
-
end
-
-
if nlp[:keyword_density]
-
low_density_keywords = nlp[:keyword_density][:keyword_densities].select do |_, data|
-
data[:density] < data[:optimal_range][:min]
-
end
-
-
if low_density_keywords.any?
-
suggestions << {
-
type: "keyword_optimization",
-
priority: "low",
-
title: "Optimize keyword usage",
-
description: "Increase usage of important brand keywords",
-
keywords_to_increase: low_density_keywords.keys,
-
effort_level: "low"
-
}
-
end
-
end
-
-
suggestions
-
end
-
-
def generate_visual_based_suggestions
-
suggestions = []
-
# Add visual-specific proactive suggestions
-
suggestions
-
end
-
-
def prioritize_suggestions(suggestions)
-
# Define priority weights
-
priority_weights = {
-
"critical" => 1000,
-
"high" => 100,
-
"medium" => 10,
-
"low" => 1
-
}
-
-
# Sort by priority weight
-
sorted = suggestions.sort_by do |suggestion|
-
-priority_weights[suggestion[:priority]]
-
end
-
-
# Remove duplicates while preserving order
-
sorted.uniq { |s| [s[:type], s[:title]] }
-
end
-
-
def add_implementation_guidance(suggestions)
-
suggestions.map do |suggestion|
-
suggestion[:implementation_guide] = generate_implementation_guide(suggestion)
-
suggestion[:estimated_time] = estimate_implementation_time(suggestion)
-
suggestion[:automation_possible] = can_automate?(suggestion)
-
-
if suggestion[:automation_possible]
-
suggestion[:automation_script] = generate_automation_script(suggestion)
-
end
-
-
suggestion
-
end
-
end
-
-
def generate_implementation_guide(suggestion)
-
case suggestion[:type]
-
when "tone_adjustment"
-
generate_tone_implementation_guide(suggestion)
-
when "vocabulary_replacement"
-
generate_vocabulary_implementation_guide(suggestion)
-
when "content_addition"
-
generate_content_implementation_guide(suggestion)
-
else
-
generate_generic_implementation_guide(suggestion)
-
end
-
end
-
-
def generate_tone_implementation_guide(suggestion)
-
{
-
steps: [
-
"Review current content tone using the provided examples",
-
"Identify sections that need adjustment",
-
"Apply the specific actions listed",
-
"Read through the entire content to ensure consistency",
-
"Test with sample audience if possible"
-
],
-
tools: ["Grammar checker", "Readability analyzer", "Brand voice guide"],
-
checkpoints: [
-
"All contractions addressed (if formalizing)",
-
"Vocabulary matches target tone",
-
"Sentence structure aligns with tone",
-
"Overall feel matches brand voice"
-
]
-
}
-
end
-
-
def generate_vocabulary_implementation_guide(suggestion)
-
{
-
steps: [
-
"Use find-and-replace for each banned word",
-
"Review context for each replacement",
-
"Ensure replacements maintain sentence flow",
-
"Update any related phrases or variations",
-
"Document replacements for future reference"
-
],
-
tools: ["Text editor with find-replace", "Brand terminology guide"],
-
checkpoints: [
-
"All banned words replaced",
-
"Replacements fit context",
-
"Content still reads naturally",
-
"Brand voice maintained"
-
]
-
}
-
end
-
-
def generate_content_implementation_guide(suggestion)
-
{
-
steps: [
-
"Locate appropriate positions for missing elements",
-
"Use provided templates as starting points",
-
"Customize templates to fit content context",
-
"Ensure smooth integration with existing content",
-
"Verify all required elements are included"
-
],
-
tools: ["Brand element templates", "Content guidelines"],
-
checkpoints: [
-
"All required elements present",
-
"Elements properly formatted",
-
"Natural integration achieved",
-
"Brand consistency maintained"
-
]
-
}
-
end
-
-
def generate_generic_implementation_guide(suggestion)
-
{
-
steps: suggestion[:specific_actions],
-
tools: ["Brand guidelines", "Style guide"],
-
checkpoints: ["Issue resolved", "Brand compliance achieved"]
-
}
-
end
-
-
def estimate_implementation_time(suggestion)
-
base_times = {
-
"low" => 15,
-
"medium" => 45,
-
"high" => 120
-
}
-
-
base_time = base_times[suggestion[:effort_level]] || 30
-
-
# Adjust based on specific factors
-
if suggestion[:specific_actions].length > 5
-
base_time *= 1.5
-
end
-
-
if suggestion[:automation_possible]
-
base_time *= 0.3
-
end
-
-
{
-
minutes: base_time.round,
-
human_readable: format_time(base_time)
-
}
-
end
-
-
def format_time(minutes)
-
if minutes < 60
-
"#{minutes.round} minutes"
-
else
-
hours = (minutes / 60.0).round(1)
-
"#{hours} hours"
-
end
-
end
-
-
def can_automate?(suggestion)
-
automatable_types = [
-
"vocabulary_replacement",
-
"color_correction",
-
"typography_alignment"
-
]
-
-
automatable_types.include?(suggestion[:type])
-
end
-
-
def generate_automation_script(suggestion)
-
case suggestion[:type]
-
when "vocabulary_replacement"
-
generate_replacement_script(suggestion)
-
when "color_correction"
-
generate_color_script(suggestion)
-
when "typography_alignment"
-
generate_typography_script(suggestion)
-
else
-
nil
-
end
-
end
-
-
def generate_replacement_script(suggestion)
-
replacements = suggestion[:word_replacements]
-
-
{
-
type: "text_replacement",
-
description: "Automated word replacement script",
-
script: replacements.map do |word, alternatives|
-
{
-
find: word,
-
replace: alternatives.first,
-
case_sensitive: false,
-
whole_word: true
-
}
-
end
-
}
-
end
-
-
def generate_color_script(suggestion)
-
mappings = suggestion[:color_mappings]
-
-
{
-
type: "css_replacement",
-
description: "Automated color replacement for CSS",
-
script: mappings.map do |old_color, new_color_data|
-
{
-
find: old_color,
-
replace: new_color_data[:color],
-
contexts: ["css", "style attributes"]
-
}
-
end
-
}
-
end
-
-
def generate_typography_script(suggestion)
-
mappings = suggestion[:font_mappings]
-
-
{
-
type: "font_replacement",
-
description: "Automated font replacement",
-
script: mappings.map do |old_font, new_font|
-
{
-
find: old_font,
-
replace: new_font,
-
preserve_weight: true,
-
preserve_style: true
-
}
-
end
-
}
-
end
-
-
# Fix generation methods
-
def fix_banned_words(violation, content)
-
banned_words = violation[:details]
-
replacements = generate_word_replacements(banned_words)
-
-
fixed_content = content.dup
-
-
replacements.each do |word, alternatives|
-
regex = /\b#{Regexp.escape(word)}\b/i
-
fixed_content.gsub!(regex, alternatives.first)
-
end
-
-
{
-
fixed_content: fixed_content,
-
changes_made: replacements,
-
confidence: 0.9
-
}
-
end
-
-
def fix_tone_mismatch(violation, content)
-
expected_tone = violation[:details][:expected]
-
-
prompt = build_tone_fix_prompt(content, expected_tone)
-
-
response = @llm_service.analyze(prompt, {
-
temperature: 0.5,
-
max_tokens: content.length + 500
-
})
-
-
{
-
fixed_content: response,
-
changes_made: ["Adjusted tone to be more #{expected_tone}"],
-
confidence: 0.7
-
}
-
end
-
-
def fix_missing_element(violation, content)
-
missing_element = violation[:details][:category]
-
template = generate_element_templates([missing_element])[missing_element]
-
-
# Determine where to add the element
-
if missing_element == "disclaimer" || missing_element == "footer"
-
fixed_content = "#{content}\n\n#{template}"
-
else
-
fixed_content = "#{template}\n\n#{content}"
-
end
-
-
{
-
fixed_content: fixed_content,
-
changes_made: ["Added required #{missing_element}"],
-
confidence: 0.8
-
}
-
end
-
-
def fix_readability(violation, content)
-
current_grade = violation[:details][:current_grade]
-
target_grade = violation[:details][:target_grade]
-
-
prompt = build_readability_fix_prompt(content, current_grade, target_grade)
-
-
response = @llm_service.analyze(prompt, {
-
temperature: 0.3,
-
max_tokens: content.length + 500
-
})
-
-
{
-
fixed_content: response,
-
changes_made: ["Adjusted readability from grade #{current_grade} to #{target_grade}"],
-
confidence: 0.6
-
}
-
end
-
-
def generate_ai_fix(violation, content)
-
prompt = build_generic_fix_prompt(violation, content)
-
-
response = @llm_service.analyze(prompt, {
-
temperature: 0.4,
-
max_tokens: content.length + 500
-
})
-
-
{
-
fixed_content: response,
-
changes_made: ["Applied AI-generated fix for #{violation[:type]}"],
-
confidence: 0.5
-
}
-
end
-
-
# Prompt builders
-
def build_alternatives_prompt(phrase, context)
-
brand_voice = brand.brand_voice_attributes
-
-
<<~PROMPT
-
Generate alternative phrasings for: "#{phrase}"
-
-
Context:
-
Content Type: #{context[:content_type]}
-
Target Audience: #{context[:audience]}
-
Brand Voice: #{brand_voice.to_json}
-
-
Provide 3-5 alternatives that:
-
1. Maintain the same meaning
-
2. Align with brand voice
-
3. Fit the context
-
4. Vary in style/approach
-
-
Format as JSON:
-
{
-
"alternatives": [
-
{
-
"text": "alternative phrase",
-
"style": "formal|casual|technical|friendly",
-
"best_for": "situation where this works best"
-
}
-
]
-
}
-
PROMPT
-
end
-
-
def build_tone_fix_prompt(content, target_tone)
-
<<~PROMPT
-
Rewrite the following content to have a #{target_tone} tone:
-
-
#{content}
-
-
Guidelines:
-
- Maintain all factual information
-
- Keep the same structure and flow
-
- Adjust vocabulary and sentence structure
-
- Ensure consistent #{target_tone} tone throughout
-
-
Return only the rewritten content.
-
PROMPT
-
end
-
-
def build_readability_fix_prompt(content, current_grade, target_grade)
-
direction = current_grade > target_grade ? "simplify" : "sophisticate"
-
-
<<~PROMPT
-
#{direction.capitalize} the following content from grade level #{current_grade} to #{target_grade}:
-
-
#{content}
-
-
Guidelines:
-
- Maintain all key information
-
- #{direction == "simplify" ? "Use shorter sentences and simpler words" : "Use more complex sentence structures and vocabulary"}
-
- Keep the same overall message
-
- Ensure natural flow
-
-
Return only the adjusted content.
-
PROMPT
-
end
-
-
def build_generic_fix_prompt(violation, content)
-
<<~PROMPT
-
Fix the following compliance issue in the content:
-
-
Issue: #{violation[:message]}
-
Type: #{violation[:type]}
-
Details: #{violation[:details].to_json}
-
-
Content:
-
#{content}
-
-
Guidelines:
-
- Address the specific issue identified
-
- Maintain content meaning and flow
-
- Follow brand guidelines
-
- Make minimal necessary changes
-
-
Return only the fixed content.
-
PROMPT
-
end
-
-
def parse_alternatives_response(response)
-
return [] unless response
-
-
begin
-
parsed = JSON.parse(response, symbolize_names: true)
-
parsed[:alternatives] || []
-
rescue JSON::ParserError
-
[]
-
end
-
end
-
-
def generate_contact_template
-
"Contact us at [email] or call [phone]"
-
end
-
-
def generate_cta_template
-
primary_cta = brand.messaging_framework&.metadata&.dig("primary_cta") || "Learn More"
-
"#{primary_cta} →"
-
end
-
end
-
end
-
end
-
module Branding
-
module Compliance
-
class VisualValidator < BaseValidator
-
SUPPORTED_FORMATS = %w[image/jpeg image/png image/gif image/webp image/svg+xml].freeze
-
-
COLOR_TOLERANCE = 15 # Delta E tolerance for color matching
-
-
def initialize(brand, content, options = {})
-
super
-
@visual_data = options[:visual_data] || {}
-
@llm_service = options[:llm_service] || LlmService.new
-
end
-
-
def validate
-
return unless visual_content?
-
-
# Validate colors
-
check_color_compliance
-
-
# Validate typography (if text is present)
-
check_typography_compliance
-
-
# Validate logo usage
-
check_logo_compliance
-
-
# Validate composition and layout
-
check_composition_compliance
-
-
# Validate image quality
-
check_quality_standards
-
-
# Check accessibility
-
check_visual_accessibility
-
-
{ violations: @violations, suggestions: @suggestions }
-
end
-
-
def analyze_image(image_data)
-
cached_result("visual_analysis:#{image_data[:id]}") do
-
prompt = build_visual_analysis_prompt(image_data)
-
-
response = @llm_service.analyze(prompt, {
-
json_response: true,
-
temperature: 0.3,
-
system_message: "You are an expert visual brand compliance analyst."
-
})
-
-
parse_json_response(response)
-
end
-
end
-
-
private
-
-
def visual_content?
-
@visual_data.present? || content_type_visual?
-
end
-
-
def content_type_visual?
-
return false unless options[:content_type]
-
-
%w[image video infographic logo banner].include?(options[:content_type])
-
end
-
-
def check_color_compliance
-
return unless @visual_data[:colors].present?
-
-
detected_colors = @visual_data[:colors]
-
brand_colors = {
-
primary: brand.primary_colors,
-
secondary: brand.secondary_colors
-
}
-
-
# Check primary color usage
-
primary_compliant = check_color_set_compliance(
-
detected_colors[:primary] || [],
-
brand_colors[:primary],
-
"primary"
-
)
-
-
# Check secondary color usage
-
secondary_compliant = check_color_set_compliance(
-
detected_colors[:secondary] || [],
-
brand_colors[:secondary],
-
"secondary"
-
)
-
-
# Check color harmony
-
check_color_harmony(detected_colors)
-
-
# Check brand color dominance
-
check_brand_color_dominance(detected_colors, brand_colors)
-
end
-
-
def check_color_set_compliance(detected_colors, brand_colors, color_type)
-
return true if brand_colors.empty?
-
-
non_compliant_colors = []
-
-
detected_colors.each do |detected|
-
unless color_matches_any?(detected, brand_colors)
-
non_compliant_colors << detected
-
end
-
end
-
-
if non_compliant_colors.any?
-
add_violation(
-
type: "color_violation",
-
severity: color_type == "primary" ? "high" : "medium",
-
message: "Non-brand #{color_type} colors detected",
-
details: {
-
non_compliant_colors: non_compliant_colors,
-
expected_colors: brand_colors,
-
color_type: color_type
-
}
-
)
-
false
-
else
-
true
-
end
-
end
-
-
def color_matches_any?(color, color_set)
-
color_set.any? do |brand_color|
-
color_distance(color, brand_color) <= COLOR_TOLERANCE
-
end
-
end
-
-
def color_distance(color1, color2)
-
# Calculate Delta E (CIE76) color distance
-
lab1 = rgb_to_lab(parse_color(color1))
-
lab2 = rgb_to_lab(parse_color(color2))
-
-
Math.sqrt(
-
(lab2[:l] - lab1[:l]) ** 2 +
-
(lab2[:a] - lab1[:a]) ** 2 +
-
(lab2[:b] - lab1[:b]) ** 2
-
)
-
end
-
-
def parse_color(color)
-
if color.start_with?('#')
-
# Hex color
-
hex = color.delete('#')
-
{
-
r: hex[0..1].to_i(16),
-
g: hex[2..3].to_i(16),
-
b: hex[4..5].to_i(16)
-
}
-
elsif color.start_with?('rgb')
-
# RGB color
-
matches = color.match(/rgb\((\d+),\s*(\d+),\s*(\d+)\)/)
-
{
-
r: matches[1].to_i,
-
g: matches[2].to_i,
-
b: matches[3].to_i
-
}
-
else
-
# Named color - would need a lookup table
-
{ r: 0, g: 0, b: 0 }
-
end
-
end
-
-
def rgb_to_lab(rgb)
-
# Convert RGB to XYZ
-
r = rgb[:r] / 255.0
-
g = rgb[:g] / 255.0
-
b = rgb[:b] / 255.0
-
-
# Gamma correction
-
r = r > 0.04045 ? ((r + 0.055) / 1.055) ** 2.4 : r / 12.92
-
g = g > 0.04045 ? ((g + 0.055) / 1.055) ** 2.4 : g / 12.92
-
b = b > 0.04045 ? ((b + 0.055) / 1.055) ** 2.4 : b / 12.92
-
-
# Observer = 2°, Illuminant = D65
-
x = (r * 0.4124 + g * 0.3576 + b * 0.1805) * 100
-
y = (r * 0.2126 + g * 0.7152 + b * 0.0722) * 100
-
z = (r * 0.0193 + g * 0.1192 + b * 0.9505) * 100
-
-
# Convert XYZ to Lab
-
x = x / 95.047
-
y = y / 100.000
-
z = z / 108.883
-
-
x = x > 0.008856 ? x ** (1.0/3.0) : (7.787 * x + 16.0/116.0)
-
y = y > 0.008856 ? y ** (1.0/3.0) : (7.787 * y + 16.0/116.0)
-
z = z > 0.008856 ? z ** (1.0/3.0) : (7.787 * z + 16.0/116.0)
-
-
{
-
l: (116 * y) - 16,
-
a: 500 * (x - y),
-
b: 200 * (y - z)
-
}
-
end
-
-
def check_color_harmony(detected_colors)
-
all_colors = (detected_colors[:primary] || []) + (detected_colors[:secondary] || [])
-
return if all_colors.length < 2
-
-
# Check for clashing colors
-
clashing_pairs = []
-
-
all_colors.combination(2).each do |color1, color2|
-
if colors_clash?(color1, color2)
-
clashing_pairs << [color1, color2]
-
end
-
end
-
-
if clashing_pairs.any?
-
add_violation(
-
type: "color_harmony",
-
severity: "low",
-
message: "Color combinations may clash",
-
details: {
-
clashing_pairs: clashing_pairs,
-
suggestion: "Consider adjusting color combinations for better harmony"
-
}
-
)
-
end
-
end
-
-
def colors_clash?(color1, color2)
-
# Simplified clash detection based on complementary colors
-
lab1 = rgb_to_lab(parse_color(color1))
-
lab2 = rgb_to_lab(parse_color(color2))
-
-
# Check if colors are too similar (muddy) or complementary (potentially clashing)
-
distance = color_distance(color1, color2)
-
-
# Too similar but not identical
-
(distance > 5 && distance < 20) ||
-
# Complementary colors with high saturation
-
(complementary_colors?(lab1, lab2) && high_saturation?(lab1) && high_saturation?(lab2))
-
end
-
-
def complementary_colors?(lab1, lab2)
-
# Check if colors are roughly complementary
-
hue_diff = (Math.atan2(lab1[:b], lab1[:a]) - Math.atan2(lab2[:b], lab2[:a])).abs
-
hue_diff = hue_diff * 180 / Math::PI
-
-
hue_diff > 150 && hue_diff < 210
-
end
-
-
def high_saturation?(lab)
-
# Calculate chroma (saturation in Lab space)
-
Math.sqrt(lab[:a] ** 2 + lab[:b] ** 2) > 50
-
end
-
-
def check_brand_color_dominance(detected_colors, brand_colors)
-
return unless @visual_data[:color_percentages]
-
-
brand_color_percentage = calculate_brand_color_percentage(
-
detected_colors,
-
brand_colors
-
)
-
-
if brand_color_percentage < 60
-
add_violation(
-
type: "brand_color_dominance",
-
severity: "medium",
-
message: "Brand colors not dominant enough",
-
details: {
-
brand_color_percentage: brand_color_percentage,
-
recommendation: "Brand colors should comprise at least 60% of the visual"
-
}
-
)
-
elsif brand_color_percentage < 70
-
add_suggestion(
-
type: "brand_color_enhancement",
-
message: "Consider increasing brand color prominence",
-
details: {
-
current_percentage: brand_color_percentage,
-
target_percentage: 70
-
}
-
)
-
end
-
end
-
-
def calculate_brand_color_percentage(detected_colors, brand_colors)
-
total_percentage = 0
-
all_brand_colors = brand_colors[:primary] + brand_colors[:secondary]
-
-
@visual_data[:color_percentages].each do |color, percentage|
-
if color_matches_any?(color, all_brand_colors)
-
total_percentage += percentage
-
end
-
end
-
-
total_percentage
-
end
-
-
def check_typography_compliance
-
return unless @visual_data[:typography].present?
-
-
detected_fonts = @visual_data[:typography][:fonts] || []
-
brand_fonts = brand.font_families
-
-
non_compliant_fonts = detected_fonts - brand_fonts.values.flatten
-
-
if non_compliant_fonts.any?
-
add_violation(
-
type: "typography_violation",
-
severity: "medium",
-
message: "Non-brand fonts detected",
-
details: {
-
non_compliant_fonts: non_compliant_fonts,
-
brand_fonts: brand_fonts
-
}
-
)
-
end
-
-
# Check font hierarchy
-
check_font_hierarchy(detected_fonts)
-
-
# Check text legibility
-
check_text_legibility
-
end
-
-
def check_font_hierarchy(detected_fonts)
-
if detected_fonts.length > 3
-
add_violation(
-
type: "font_hierarchy",
-
severity: "low",
-
message: "Too many font variations",
-
details: {
-
font_count: detected_fonts.length,
-
recommendation: "Limit to 2-3 font variations for better hierarchy"
-
}
-
)
-
end
-
end
-
-
def check_text_legibility
-
return unless @visual_data[:typography][:legibility_score]
-
-
score = @visual_data[:typography][:legibility_score]
-
-
if score < 0.6
-
add_violation(
-
type: "text_legibility",
-
severity: "high",
-
message: "Text legibility issues detected",
-
details: {
-
legibility_score: score,
-
issues: @visual_data[:typography][:legibility_issues] || []
-
}
-
)
-
elsif score < 0.8
-
add_suggestion(
-
type: "legibility_improvement",
-
message: "Text legibility could be improved",
-
details: {
-
current_score: score,
-
suggestions: suggest_legibility_improvements
-
}
-
)
-
end
-
end
-
-
def check_logo_compliance
-
return unless @visual_data[:logo].present?
-
-
logo_data = @visual_data[:logo]
-
-
# Check logo size
-
check_logo_size(logo_data)
-
-
# Check logo clear space
-
check_logo_clear_space(logo_data)
-
-
# Check logo placement
-
check_logo_placement(logo_data)
-
-
# Check logo modifications
-
check_logo_integrity(logo_data)
-
end
-
-
def check_logo_size(logo_data)
-
min_size = brand.brand_guidelines
-
.by_category("logo")
-
.find { |g| g.metadata&.dig("min_size") }
-
&.metadata&.dig("min_size") || 100
-
-
if logo_data[:size] && logo_data[:size] < min_size
-
add_violation(
-
type: "logo_size",
-
severity: "high",
-
message: "Logo is below minimum size requirements",
-
details: {
-
current_size: logo_data[:size],
-
minimum_size: min_size
-
}
-
)
-
end
-
end
-
-
def check_logo_clear_space(logo_data)
-
return unless logo_data[:clear_space_ratio]
-
-
min_clear_space = 0.5 # Half the logo height/width
-
-
if logo_data[:clear_space_ratio] < min_clear_space
-
add_violation(
-
type: "logo_clear_space",
-
severity: "medium",
-
message: "Insufficient clear space around logo",
-
details: {
-
current_ratio: logo_data[:clear_space_ratio],
-
required_ratio: min_clear_space
-
}
-
)
-
end
-
end
-
-
def check_logo_placement(logo_data)
-
approved_placements = brand.brand_guidelines
-
.by_category("logo")
-
.find { |g| g.metadata&.dig("approved_placements") }
-
&.metadata&.dig("approved_placements") ||
-
["top-left", "top-center", "center"]
-
-
if logo_data[:placement] && !approved_placements.include?(logo_data[:placement])
-
add_violation(
-
type: "logo_placement",
-
severity: "medium",
-
message: "Logo placed in non-approved position",
-
details: {
-
current_placement: logo_data[:placement],
-
approved_placements: approved_placements
-
}
-
)
-
end
-
end
-
-
def check_logo_integrity(logo_data)
-
if logo_data[:modified]
-
modifications = logo_data[:modifications] || []
-
-
add_violation(
-
type: "logo_modification",
-
severity: "critical",
-
message: "Logo has been modified",
-
details: {
-
modifications: modifications,
-
rule: "Logo must not be altered in any way"
-
}
-
)
-
end
-
end
-
-
def check_composition_compliance
-
return unless @visual_data[:composition]
-
-
composition = @visual_data[:composition]
-
-
# Check balance
-
if composition[:balance_score] && composition[:balance_score] < 0.6
-
add_suggestion(
-
type: "composition_balance",
-
message: "Visual composition could be better balanced",
-
details: {
-
balance_score: composition[:balance_score],
-
suggestions: ["Redistribute visual weight", "Align elements to grid"]
-
}
-
)
-
end
-
-
# Check whitespace
-
check_whitespace_usage(composition)
-
-
# Check visual hierarchy
-
check_visual_hierarchy(composition)
-
end
-
-
def check_whitespace_usage(composition)
-
whitespace_ratio = composition[:whitespace_ratio] || 0
-
-
if whitespace_ratio < 0.2
-
add_violation(
-
type: "whitespace_insufficient",
-
severity: "medium",
-
message: "Insufficient whitespace",
-
details: {
-
current_ratio: whitespace_ratio,
-
recommendation: "Increase whitespace for better readability"
-
}
-
)
-
elsif whitespace_ratio > 0.7
-
add_suggestion(
-
type: "whitespace_excessive",
-
message: "Consider using space more efficiently",
-
details: {
-
current_ratio: whitespace_ratio
-
}
-
)
-
end
-
end
-
-
def check_visual_hierarchy(composition)
-
hierarchy_score = composition[:hierarchy_score] || 0
-
-
if hierarchy_score < 0.5
-
add_violation(
-
type: "visual_hierarchy",
-
severity: "medium",
-
message: "Weak visual hierarchy",
-
details: {
-
hierarchy_score: hierarchy_score,
-
issues: composition[:hierarchy_issues] || [],
-
suggestions: [
-
"Use size contrast for importance",
-
"Apply consistent spacing",
-
"Group related elements"
-
]
-
}
-
)
-
end
-
end
-
-
def check_quality_standards
-
return unless @visual_data[:quality]
-
-
quality = @visual_data[:quality]
-
-
# Check resolution
-
if quality[:resolution] && quality[:resolution] < 72
-
add_violation(
-
type: "low_resolution",
-
severity: "high",
-
message: "Image resolution too low",
-
details: {
-
current_dpi: quality[:resolution],
-
minimum_dpi: 72,
-
recommendation: "Use images with at least 72 DPI for web, 300 DPI for print"
-
}
-
)
-
end
-
-
# Check compression artifacts
-
if quality[:compression_score] && quality[:compression_score] < 0.7
-
add_suggestion(
-
type: "compression_quality",
-
message: "Image shows compression artifacts",
-
details: {
-
quality_score: quality[:compression_score],
-
recommendation: "Use higher quality compression settings"
-
}
-
)
-
end
-
-
# Check file size
-
check_file_size_optimization(quality)
-
end
-
-
def check_file_size_optimization(quality)
-
return unless quality[:file_size] && quality[:dimensions]
-
-
# Calculate bytes per pixel
-
total_pixels = quality[:dimensions][:width] * quality[:dimensions][:height]
-
bytes_per_pixel = quality[:file_size].to_f / total_pixels
-
-
# Rough guidelines for web images
-
if bytes_per_pixel > 1.5
-
add_suggestion(
-
type: "file_size_optimization",
-
message: "Image file size could be optimized",
-
details: {
-
current_size: quality[:file_size],
-
bytes_per_pixel: bytes_per_pixel.round(2),
-
recommendation: "Consider optimizing without quality loss"
-
}
-
)
-
end
-
end
-
-
def check_visual_accessibility
-
# Check color contrast
-
check_color_contrast
-
-
# Check for alt text (if applicable)
-
check_alt_text
-
-
# Check for motion/animation issues
-
check_motion_accessibility
-
end
-
-
def check_color_contrast
-
return unless @visual_data[:accessibility]
-
-
contrast_issues = @visual_data[:accessibility][:contrast_issues] || []
-
-
if contrast_issues.any?
-
add_violation(
-
type: "color_contrast",
-
severity: "high",
-
message: "Color contrast accessibility issues",
-
details: {
-
issues: contrast_issues,
-
wcag_level: "AA",
-
recommendation: "Ensure 4.5:1 contrast for normal text, 3:1 for large text"
-
}
-
)
-
end
-
end
-
-
def check_alt_text
-
return unless options[:requires_alt_text]
-
-
if @visual_data[:alt_text].blank?
-
add_violation(
-
type: "missing_alt_text",
-
severity: "high",
-
message: "Missing alternative text for accessibility",
-
details: {
-
recommendation: "Add descriptive alt text for screen readers"
-
}
-
)
-
elsif @visual_data[:alt_text].length < 10
-
add_suggestion(
-
type: "improve_alt_text",
-
message: "Alt text could be more descriptive",
-
details: {
-
current_length: @visual_data[:alt_text].length,
-
recommendation: "Provide meaningful description of the visual content"
-
}
-
)
-
end
-
end
-
-
def check_motion_accessibility
-
return unless @visual_data[:has_animation]
-
-
animation_data = @visual_data[:animation] || {}
-
-
if animation_data[:autoplay] && !animation_data[:has_pause_control]
-
add_violation(
-
type: "motion_control",
-
severity: "medium",
-
message: "Auto-playing animation without pause control",
-
details: {
-
recommendation: "Provide user controls for animations",
-
wcag_guideline: "2.2.2 Pause, Stop, Hide"
-
}
-
)
-
end
-
-
if animation_data[:flashing_detected]
-
add_violation(
-
type: "flashing_content",
-
severity: "critical",
-
message: "Flashing content detected",
-
details: {
-
recommendation: "Remove flashing to prevent seizures",
-
wcag_guideline: "2.3.1 Three Flashes or Below Threshold"
-
}
-
)
-
end
-
end
-
-
def build_visual_analysis_prompt(image_data)
-
<<~PROMPT
-
Analyze this image for brand compliance based on these guidelines:
-
-
Brand Colors:
-
Primary: #{brand.primary_colors.to_json}
-
Secondary: #{brand.secondary_colors.to_json}
-
-
Brand Fonts:
-
#{brand.font_families.to_json}
-
-
Visual Guidelines:
-
#{extract_visual_guidelines.to_json}
-
-
Please analyze:
-
1. Color usage and compliance
-
2. Typography (if text is present)
-
3. Logo usage and placement
-
4. Overall composition and balance
-
5. Brand consistency
-
-
Return analysis in JSON format with detailed findings.
-
PROMPT
-
end
-
-
def extract_visual_guidelines
-
guidelines = {}
-
-
%w[logo color typography composition].each do |category|
-
category_guidelines = brand.brand_guidelines.by_category(category)
-
guidelines[category] = category_guidelines.map do |g|
-
{
-
rule: g.rule_content,
-
type: g.rule_type,
-
mandatory: g.mandatory?
-
}
-
end
-
end
-
-
guidelines
-
end
-
-
def suggest_legibility_improvements
-
[
-
"Increase font size for body text",
-
"Improve contrast between text and background",
-
"Use simpler fonts for better readability",
-
"Increase line spacing",
-
"Avoid thin font weights for small text"
-
]
-
end
-
-
def parse_json_response(response)
-
return nil if response.nil?
-
-
begin
-
JSON.parse(response, symbolize_names: true)
-
rescue JSON::ParserError
-
Rails.logger.error "Failed to parse visual analysis response"
-
nil
-
end
-
end
-
end
-
end
-
end
-
module Branding
-
class ComplianceService
-
attr_reader :brand, :content, :content_type
-
-
COMPLIANCE_THRESHOLDS = {
-
high: 0.9,
-
medium: 0.7,
-
low: 0.5
-
}.freeze
-
-
def initialize(brand, content, content_type = "general")
-
@brand = brand
-
@content = content
-
@content_type = content_type
-
@violations = []
-
@suggestions = []
-
@score = 0.0
-
end
-
-
def check_compliance
-
return build_response(false, "No content provided") if content.blank?
-
return build_response(false, "No brand specified") if brand.blank?
-
-
# Run all compliance checks
-
check_banned_words
-
check_tone_compliance
-
check_messaging_alignment
-
check_style_guidelines
-
check_required_elements
-
check_visual_compliance if visual_content?
-
-
# Calculate overall compliance score
-
calculate_compliance_score
-
-
build_response(true)
-
end
-
-
def validate_and_suggest
-
result = check_compliance
-
-
if result[:compliant]
-
result[:suggestions] = generate_improvements
-
else
-
result[:corrections] = generate_corrections
-
end
-
-
result
-
end
-
-
private
-
-
def check_banned_words
-
messaging_framework = brand.messaging_framework
-
return unless messaging_framework
-
-
banned_words = messaging_framework.get_banned_words_in_text(content)
-
-
if banned_words.any?
-
add_violation(
-
type: "banned_words",
-
severity: "high",
-
message: "Content contains banned words: #{banned_words.join(', ')}",
-
details: banned_words
-
)
-
end
-
end
-
-
def check_tone_compliance
-
analysis = brand.latest_analysis
-
return unless analysis
-
-
expected_tone = analysis.voice_attributes.dig("tone", "primary")
-
detected_tone = analyze_content_tone
-
-
if tone_mismatch?(expected_tone, detected_tone)
-
add_violation(
-
type: "tone_mismatch",
-
severity: "medium",
-
message: "Content tone (#{detected_tone}) doesn't match brand tone (#{expected_tone})",
-
details: {
-
expected: expected_tone,
-
detected: detected_tone
-
}
-
)
-
end
-
end
-
-
def check_messaging_alignment
-
messaging_framework = brand.messaging_framework
-
return unless messaging_framework
-
-
key_messages = messaging_framework.key_messages.values.flatten
-
value_props = messaging_framework.value_propositions["main"] || []
-
-
alignment_score = calculate_message_alignment(key_messages + value_props)
-
-
if alignment_score < 0.3
-
add_violation(
-
type: "messaging_misalignment",
-
severity: "medium",
-
message: "Content doesn't align well with brand key messages",
-
details: {
-
alignment_score: alignment_score,
-
missing_themes: identify_missing_themes(key_messages)
-
}
-
)
-
elsif alignment_score < 0.6
-
add_suggestion(
-
type: "messaging_improvement",
-
message: "Consider incorporating more brand key messages",
-
details: {
-
current_alignment: alignment_score,
-
suggested_themes: identify_missing_themes(key_messages).first(3)
-
}
-
)
-
end
-
end
-
-
def check_style_guidelines
-
guidelines = brand.brand_guidelines.active.by_category("style")
-
-
guidelines.each do |guideline|
-
if guideline.mandatory? && !content_follows_guideline?(guideline)
-
add_violation(
-
type: "style_violation",
-
severity: guideline.priority >= 8 ? "high" : "medium",
-
message: "Violates style guideline: #{guideline.rule_content}",
-
details: {
-
rule_type: guideline.rule_type,
-
guideline_id: guideline.id
-
}
-
)
-
end
-
end
-
end
-
-
def check_required_elements
-
required_guidelines = brand.brand_guidelines.mandatory_rules
-
-
required_guidelines.each do |guideline|
-
next if content_includes_required_element?(guideline)
-
-
add_violation(
-
type: "missing_required_element",
-
severity: "high",
-
message: "Missing required element: #{guideline.rule_content}",
-
details: {
-
guideline_id: guideline.id,
-
category: guideline.category
-
}
-
)
-
end
-
end
-
-
def check_visual_compliance
-
# Placeholder for visual content compliance checks
-
# Would check colors, fonts, logo usage, etc.
-
end
-
-
def analyze_content_tone
-
# Simplified tone detection - in production would use NLP
-
formal_indicators = %w[therefore however furthermore consequently]
-
casual_indicators = %w[hey gonna wanna cool awesome]
-
-
content_lower = content.downcase
-
-
formal_count = formal_indicators.count { |word| content_lower.include?(word) }
-
casual_count = casual_indicators.count { |word| content_lower.include?(word) }
-
-
if formal_count > casual_count * 2
-
"formal"
-
elsif casual_count > formal_count * 2
-
"casual"
-
else
-
"neutral"
-
end
-
end
-
-
def tone_mismatch?(expected, detected)
-
tone_compatibility = {
-
"formal" => ["formal", "professional"],
-
"professional" => ["formal", "professional", "neutral"],
-
"friendly" => ["friendly", "casual", "neutral"],
-
"casual" => ["casual", "friendly"]
-
}
-
-
compatible_tones = tone_compatibility[expected] || [expected]
-
!compatible_tones.include?(detected)
-
end
-
-
def calculate_message_alignment(key_messages)
-
return 0.0 if key_messages.empty?
-
-
content_lower = content.downcase
-
matched_messages = key_messages.count do |message|
-
message_words = message.downcase.split(/\W+/)
-
message_words.any? { |word| content_lower.include?(word) }
-
end
-
-
matched_messages.to_f / key_messages.size
-
end
-
-
def identify_missing_themes(key_messages)
-
content_lower = content.downcase
-
-
key_messages.reject do |message|
-
message_words = message.downcase.split(/\W+/)
-
message_words.any? { |word| content_lower.include?(word) }
-
end
-
end
-
-
def content_follows_guideline?(guideline)
-
case guideline.rule_type
-
when "do", "must"
-
# Check if content follows positive guideline
-
guideline_keywords = extract_keywords(guideline.rule_content)
-
guideline_keywords.any? { |keyword| content.downcase.include?(keyword.downcase) }
-
when "dont", "avoid"
-
# Check if content avoids negative guideline
-
guideline_keywords = extract_keywords(guideline.rule_content)
-
guideline_keywords.none? { |keyword| content.downcase.include?(keyword.downcase) }
-
else
-
true
-
end
-
end
-
-
def content_includes_required_element?(guideline)
-
return true unless guideline.rule_type == "must"
-
-
# Check if required element is present
-
required_keywords = extract_keywords(guideline.rule_content)
-
required_keywords.any? { |keyword| content.downcase.include?(keyword.downcase) }
-
end
-
-
def extract_keywords(text)
-
# Extract meaningful keywords from guideline text
-
stop_words = %w[the a an and or but in on at to for of with as by]
-
-
text.downcase
-
.split(/\W+/)
-
.reject { |word| stop_words.include?(word) || word.length < 3 }
-
end
-
-
def calculate_compliance_score
-
return 1.0 if @violations.empty?
-
-
# Weight violations by severity
-
severity_weights = { high: 1.0, medium: 0.5, low: 0.25 }
-
-
total_weight = @violations.sum do |violation|
-
severity_weights[violation[:severity].to_sym] || 0.5
-
end
-
-
# Calculate score (0-1 scale)
-
max_possible_violations = 10.0 # Assumed maximum
-
@score = [1.0 - (total_weight / max_possible_violations), 0].max
-
end
-
-
def generate_improvements
-
improvements = []
-
-
# Suggest incorporating more key messages if alignment is moderate
-
if @score > 0.7 && @score < 0.9
-
improvements << {
-
type: "enhance_messaging",
-
suggestion: "Consider adding more brand-specific value propositions",
-
priority: "low"
-
}
-
end
-
-
# Suggest tone adjustments
-
if @suggestions.any? { |s| s[:type] == "tone_adjustment" }
-
improvements << {
-
type: "refine_tone",
-
suggestion: "Fine-tune the tone to better match brand voice",
-
priority: "medium"
-
}
-
end
-
-
improvements + @suggestions
-
end
-
-
def generate_corrections
-
@violations.map do |violation|
-
{
-
type: violation[:type],
-
correction: suggest_correction_for(violation),
-
priority: violation[:severity],
-
details: violation[:details]
-
}
-
end
-
end
-
-
def suggest_correction_for(violation)
-
case violation[:type]
-
when "banned_words"
-
"Replace the following banned words: #{violation[:details].join(', ')}"
-
when "tone_mismatch"
-
"Adjust tone from #{violation[:details][:detected]} to #{violation[:details][:expected]}"
-
when "missing_required_element"
-
"Add required element: #{violation[:message]}"
-
when "style_violation"
-
"Follow style guideline: #{violation[:message]}"
-
else
-
"Address issue: #{violation[:message]}"
-
end
-
end
-
-
def visual_content?
-
%w[image video infographic].include?(content_type)
-
end
-
-
def add_violation(type:, severity:, message:, details: {})
-
@violations << {
-
type: type,
-
severity: severity,
-
message: message,
-
details: details,
-
timestamp: Time.current
-
}
-
end
-
-
def add_suggestion(type:, message:, details: {})
-
@suggestions << {
-
type: type,
-
message: message,
-
details: details,
-
timestamp: Time.current
-
}
-
end
-
-
def build_response(success, error_message = nil)
-
if success
-
{
-
compliant: @violations.empty?,
-
score: @score,
-
violations: @violations,
-
suggestions: @suggestions,
-
summary: compliance_summary
-
}
-
else
-
{
-
compliant: false,
-
score: 0,
-
error: error_message,
-
violations: [],
-
suggestions: []
-
}
-
end
-
end
-
-
def compliance_summary
-
if @violations.empty?
-
"Content is fully compliant with brand guidelines."
-
elsif @score >= COMPLIANCE_THRESHOLDS[:high]
-
"Content is highly compliant with minor adjustments needed."
-
elsif @score >= COMPLIANCE_THRESHOLDS[:medium]
-
"Content is moderately compliant. Several improvements recommended."
-
elsif @score >= COMPLIANCE_THRESHOLDS[:low]
-
"Content has compliance issues that should be addressed."
-
else
-
"Content has significant compliance violations requiring major revisions."
-
end
-
end
-
end
-
end
-
module Branding
-
class ComplianceServiceV2
-
include ActiveSupport::Configurable
-
-
config_accessor :cache_store, default: Rails.cache
-
config_accessor :broadcast_violations, default: true
-
config_accessor :async_processing, default: true
-
config_accessor :max_processing_time, default: 30.seconds
-
-
attr_reader :brand, :content, :content_type, :options
-
-
COMPLIANCE_LEVELS = {
-
strict: { threshold: 0.95, tolerance: :none },
-
standard: { threshold: 0.85, tolerance: :low },
-
flexible: { threshold: 0.70, tolerance: :medium },
-
advisory: { threshold: 0.50, tolerance: :high }
-
}.freeze
-
-
def initialize(brand, content, content_type = "general", options = {})
-
@brand = brand
-
@content = content
-
@content_type = content_type
-
@options = default_options.merge(options)
-
@validators = []
-
@results = {}
-
-
setup_validators
-
end
-
-
def check_compliance
-
start_time = Time.current
-
-
# Run validations based on configuration
-
if options[:async] && content_large?
-
check_compliance_async
-
else
-
check_compliance_sync
-
end
-
-
# Compile results
-
compile_results
-
-
# Generate suggestions if requested
-
if options[:generate_suggestions]
-
@results[:suggestions] = generate_intelligent_suggestions
-
end
-
-
# Add metadata
-
@results[:metadata] = {
-
processing_time: Time.current - start_time,
-
validators_used: @validators.map(&:class).map(&:name),
-
compliance_level: options[:compliance_level],
-
cached_results_used: @results[:cache_hits] || 0
-
}
-
-
@results
-
rescue StandardError => e
-
handle_error(e)
-
end
-
-
def validate_and_fix
-
compliance_results = check_compliance
-
-
return compliance_results if compliance_results[:compliant]
-
-
# Attempt to auto-fix violations
-
fix_results = auto_fix_violations(compliance_results[:violations])
-
-
# Re-validate fixed content if changes were made
-
if fix_results[:content_changed]
-
@content = fix_results[:fixed_content]
-
revalidation_results = check_compliance
-
-
{
-
original_results: compliance_results,
-
fixes_applied: fix_results[:fixes],
-
final_results: revalidation_results,
-
fixed_content: fix_results[:fixed_content]
-
}
-
else
-
compliance_results.merge(fixes_available: fix_results[:fixes])
-
end
-
end
-
-
def check_specific_aspects(aspects)
-
results = {}
-
-
aspects.each do |aspect|
-
validator = validator_for_aspect(aspect)
-
next unless validator
-
-
result = run_validator(validator)
-
results[aspect] = result
-
end
-
-
compile_aspect_results(results)
-
end
-
-
def preview_fixes(violations = nil)
-
violations ||= @results[:violations] || []
-
-
suggestion_engine = Compliance::SuggestionEngine.new(brand, violations, @results)
-
fixes = {}
-
-
violations.each do |violation|
-
fixes[violation[:id]] = suggestion_engine.generate_fix(violation, content)
-
end
-
-
fixes
-
end
-
-
private
-
-
def default_options
-
{
-
compliance_level: :standard,
-
async: config.async_processing,
-
generate_suggestions: true,
-
real_time_updates: config.broadcast_violations,
-
cache_results: true,
-
include_visual: content_type.include?("visual") || content_type.include?("image"),
-
nlp_analysis_depth: :full,
-
timeout: config.max_processing_time
-
}
-
end
-
-
def setup_validators
-
# Always include rule engine
-
@validators << Compliance::RuleEngine.new(brand)
-
-
# NLP analyzer for text content
-
if has_text_content?
-
@validators << Compliance::NlpAnalyzer.new(brand, content, options)
-
end
-
-
# Visual validator for visual content
-
if options[:include_visual] && options[:visual_data]
-
@validators << Compliance::VisualValidator.new(brand, content, options)
-
end
-
-
# Add custom validators if provided
-
if options[:custom_validators]
-
@validators.concat(options[:custom_validators])
-
end
-
end
-
-
def check_compliance_sync
-
@validators.each do |validator|
-
result = run_validator(validator)
-
merge_validator_results(result, validator)
-
end
-
end
-
-
def check_compliance_async
-
futures = @validators.map do |validator|
-
Concurrent::Future.execute do
-
run_validator(validator)
-
end
-
end
-
-
# Wait for all validators with timeout
-
futures.each_with_index do |future, index|
-
if future.wait(options[:timeout])
-
merge_validator_results(future.value, @validators[index])
-
else
-
@results[:errors] ||= []
-
@results[:errors] << {
-
validator: @validators[index].class.name,
-
error: "Timeout exceeded"
-
}
-
end
-
end
-
end
-
-
def run_validator(validator)
-
cache_key = validator_cache_key(validator)
-
-
if options[:cache_results] && cache_store
-
cached = cache_store.fetch(cache_key, expires_in: 5.minutes) do
-
run_validator_safely(validator)
-
end
-
-
@results[:cache_hits] ||= 0
-
@results[:cache_hits] += 1 if cached[:cached]
-
-
cached
-
else
-
run_validator_safely(validator)
-
end
-
end
-
-
def run_validator_safely(validator)
-
if validator.is_a?(Compliance::RuleEngine)
-
# Rule engine has different interface
-
context = {
-
content_type: content_type,
-
channel: options[:channel],
-
audience: options[:audience]
-
}
-
validator.evaluate(content, context)
-
else
-
validator.validate
-
end
-
rescue StandardError => e
-
{
-
error: e.message,
-
validator: validator.class.name,
-
violations: [],
-
suggestions: []
-
}
-
end
-
-
def merge_validator_results(result, validator)
-
return if result[:error]
-
-
# Merge violations
-
if result[:violations]
-
@results[:violations] ||= []
-
@results[:violations].concat(normalize_violations(result[:violations], validator))
-
elsif result[:failed]
-
# Handle RuleEngine format
-
@results[:violations] ||= []
-
@results[:violations].concat(convert_rule_failures(result[:failed]))
-
end
-
-
# Merge suggestions
-
if result[:suggestions]
-
@results[:suggestions] ||= []
-
@results[:suggestions].concat(result[:suggestions])
-
elsif result[:warnings]
-
# Handle RuleEngine warnings as suggestions
-
@results[:suggestions] ||= []
-
@results[:suggestions].concat(convert_rule_warnings(result[:warnings]))
-
end
-
-
# Store analysis results
-
if result[:analysis]
-
@results[:analysis] ||= {}
-
@results[:analysis][validator.class.name.demodulize.underscore] = result[:analysis]
-
end
-
-
# Track scores
-
if result[:score]
-
@results[:scores] ||= {}
-
@results[:scores][validator.class.name.demodulize.underscore] = result[:score]
-
end
-
end
-
-
def normalize_violations(violations, validator)
-
violations.map.with_index do |violation, index|
-
violation.merge(
-
id: "#{validator.class.name.demodulize.underscore}_#{index}",
-
validator_type: validator.class.name.demodulize.underscore
-
)
-
end
-
end
-
-
def convert_rule_failures(failures)
-
failures.map do |failure|
-
{
-
id: failure[:rule_id],
-
type: "rule_violation",
-
severity: failure[:severity],
-
message: failure[:message],
-
details: failure[:details],
-
validator_type: "rule_engine"
-
}
-
end
-
end
-
-
def convert_rule_warnings(warnings)
-
warnings.map do |warning|
-
{
-
type: "rule_warning",
-
message: warning[:message],
-
details: warning[:details],
-
priority: "low"
-
}
-
end
-
end
-
-
def compile_results
-
violations = @results[:violations] || []
-
suggestions = @results[:suggestions] || []
-
-
# Calculate overall compliance
-
compliance_level = COMPLIANCE_LEVELS[options[:compliance_level]]
-
score = calculate_overall_score
-
-
@results[:compliant] = violations.empty? ||
-
(score >= compliance_level[:threshold] &&
-
allows_violations?(violations, compliance_level))
-
-
@results[:score] = score
-
@results[:summary] = generate_summary(score, violations, suggestions)
-
@results[:violations] = prioritize_violations(violations)
-
@results[:suggestions] = deduplicate_suggestions(suggestions)
-
-
# Broadcast if enabled
-
broadcast_results if options[:real_time_updates]
-
-
@results
-
end
-
-
def calculate_overall_score
-
scores = @results[:scores] || {}
-
return 1.0 if scores.empty?
-
-
# Weight scores based on validator importance
-
weights = {
-
"rule_engine" => 0.4,
-
"nlp_analyzer" => 0.35,
-
"visual_validator" => 0.25
-
}
-
-
weighted_sum = 0.0
-
total_weight = 0.0
-
-
scores.each do |validator, score|
-
weight = weights[validator] || 0.2
-
weighted_sum += score * weight
-
total_weight += weight
-
end
-
-
total_weight > 0 ? (weighted_sum / total_weight).round(3) : 0.0
-
end
-
-
def allows_violations?(violations, compliance_level)
-
case compliance_level[:tolerance]
-
when :none
-
false
-
when :low
-
violations.none? { |v| %w[critical high].include?(v[:severity]) }
-
when :medium
-
violations.none? { |v| v[:severity] == "critical" }
-
when :high
-
true
-
end
-
end
-
-
def generate_summary(score, violations, suggestions)
-
severity_counts = violations.group_by { |v| v[:severity] }.transform_values(&:count)
-
-
if violations.empty?
-
"Content is fully compliant with brand guidelines (score: #{(score * 100).round}%)."
-
elsif score >= 0.9
-
"Content is highly compliant with minor issues (score: #{(score * 100).round}%)."
-
elsif score >= 0.7
-
"Content is moderately compliant. #{severity_counts.map { |s, c| "#{c} #{s}" }.join(', ')} violations found."
-
elsif score >= 0.5
-
"Content has compliance issues that should be addressed. #{violations.count} violations found."
-
else
-
"Content has significant compliance violations requiring major revisions."
-
end
-
end
-
-
def prioritize_violations(violations)
-
severity_order = { "critical" => 0, "high" => 1, "medium" => 2, "low" => 3 }
-
-
violations.sort_by do |violation|
-
[
-
severity_order[violation[:severity]] || 4,
-
violation[:type],
-
violation[:message]
-
]
-
end
-
end
-
-
def deduplicate_suggestions(suggestions)
-
suggestions.uniq { |s| [s[:type], s[:message]] }
-
.sort_by { |s| s[:priority] == "high" ? 0 : 1 }
-
end
-
-
def generate_intelligent_suggestions
-
all_violations = @results[:violations] || []
-
analysis_data = @results[:analysis] || {}
-
-
suggestion_engine = Compliance::SuggestionEngine.new(brand, all_violations, analysis_data)
-
suggestion_engine.generate_suggestions
-
end
-
-
def auto_fix_violations(violations)
-
return { content_changed: false, fixes: [] } if violations.empty?
-
-
suggestion_engine = Compliance::SuggestionEngine.new(brand, violations, @results[:analysis])
-
fixed_content = content.dup
-
fixes_applied = []
-
-
# Apply fixes in order of severity
-
violations.each do |violation|
-
fix = suggestion_engine.generate_fix(violation, fixed_content)
-
-
if fix[:confidence] > 0.7
-
fixed_content = fix[:fixed_content]
-
fixes_applied << {
-
violation_id: violation[:id],
-
fix_applied: fix[:changes_made],
-
confidence: fix[:confidence]
-
}
-
end
-
end
-
-
{
-
content_changed: fixes_applied.any?,
-
fixed_content: fixed_content,
-
fixes: fixes_applied
-
}
-
end
-
-
def broadcast_results
-
return unless config.broadcast_violations
-
-
ActionCable.server.broadcast(
-
"brand_compliance_#{brand.id}",
-
{
-
event: "compliance_check_complete",
-
compliant: @results[:compliant],
-
score: @results[:score],
-
violations_count: (@results[:violations] || []).count,
-
suggestions_count: (@results[:suggestions] || []).count
-
}
-
)
-
end
-
-
def validator_cache_key(validator)
-
[
-
"brand_compliance",
-
brand.id,
-
validator.class.name.underscore,
-
Digest::MD5.hexdigest(content.to_s),
-
content_type
-
].join(":")
-
end
-
-
def content_large?
-
content.length > 10_000
-
end
-
-
def has_text_content?
-
content.is_a?(String) && content.present?
-
end
-
-
def validator_for_aspect(aspect)
-
case aspect
-
when :tone, :readability, :sentiment, :brand_voice
-
Compliance::NlpAnalyzer.new(brand, content, options)
-
when :colors, :typography, :logo, :composition
-
Compliance::VisualValidator.new(brand, content, options)
-
when :rules, :guidelines
-
Compliance::RuleEngine.new(brand)
-
else
-
nil
-
end
-
end
-
-
def compile_aspect_results(aspect_results)
-
{
-
aspects_checked: aspect_results.keys,
-
compliant: aspect_results.values.none? { |r| r[:violations]&.any? },
-
results: aspect_results,
-
summary: "Checked #{aspect_results.keys.join(', ')} aspects"
-
}
-
end
-
-
def handle_error(error)
-
Rails.logger.error "Compliance check error: #{error.message}"
-
Rails.logger.error error.backtrace.join("\n")
-
-
{
-
compliant: false,
-
error: error.message,
-
error_type: error.class.name,
-
violations: [],
-
suggestions: [],
-
score: 0.0,
-
summary: "Compliance check failed due to an error"
-
}
-
end
-
end
-
end
-
# Example usage of the enhanced Brand Compliance Validation Service
-
-
module Branding
-
class ComplianceUsageExample
-
def self.demonstrate
-
# 1. Basic compliance check
-
brand = Brand.first
-
content = "Check out our amazing new product! It's the best solution for your needs."
-
-
service = ComplianceServiceV2.new(brand, content, "marketing_copy")
-
results = service.check_compliance
-
-
puts "=== Basic Compliance Check ==="
-
puts "Compliant: #{results[:compliant]}"
-
puts "Score: #{results[:score]}"
-
puts "Summary: #{results[:summary]}"
-
puts "Violations: #{results[:violations].count}"
-
puts "Suggestions: #{results[:suggestions].count}"
-
puts
-
-
# 2. Check specific aspects
-
puts "=== Specific Aspect Validation ==="
-
aspect_results = service.check_specific_aspects([:tone, :readability])
-
aspect_results.each do |aspect, result|
-
puts "#{aspect}: #{result[:violations].count} violations"
-
end
-
puts
-
-
# 3. Auto-fix violations
-
puts "=== Auto-Fix Violations ==="
-
fix_results = service.validate_and_fix
-
if fix_results[:fixes_applied]
-
puts "Original compliant: #{fix_results[:original_results][:compliant]}"
-
puts "Fixes applied: #{fix_results[:fixes_applied].count}"
-
puts "Final compliant: #{fix_results[:final_results][:compliant]}"
-
puts "Fixed content preview: #{fix_results[:fixed_content][0..100]}..."
-
end
-
puts
-
-
# 4. Visual content compliance
-
puts "=== Visual Content Compliance ==="
-
visual_data = {
-
colors: {
-
primary: ["#1E40AF", "#3B82F6"],
-
secondary: ["#10B981", "#34D399"]
-
},
-
typography: {
-
fonts: ["Inter", "Roboto"],
-
legibility_score: 0.85
-
},
-
logo: {
-
size: 150,
-
placement: "top-left",
-
clear_space_ratio: 0.6
-
},
-
quality: {
-
resolution: 72,
-
file_size: 250_000,
-
dimensions: { width: 1200, height: 600 }
-
}
-
}
-
-
visual_service = ComplianceServiceV2.new(
-
brand,
-
"Visual content description",
-
"image",
-
{ visual_data: visual_data }
-
)
-
visual_results = visual_service.check_compliance
-
puts "Visual compliance score: #{visual_results[:score]}"
-
puts
-
-
# 5. Async processing for large content
-
puts "=== Async Processing ==="
-
large_content = "Large content " * 1000 # Simulating large content
-
-
job = BrandComplianceJob.perform_later(
-
brand.id,
-
large_content,
-
"article",
-
{
-
user_id: brand.user_id,
-
broadcast_events: true,
-
store_results: true
-
}
-
)
-
puts "Job queued with ID: #{job.job_id}"
-
puts
-
-
# 6. Using the API endpoint
-
puts "=== API Usage Example ==="
-
puts <<~CURL
-
# Check compliance via API
-
curl -X POST http://localhost:3000/api/v1/brands/#{brand.id}/compliance/check \\
-
-H "Content-Type: application/json" \\
-
-H "Authorization: Bearer YOUR_TOKEN" \\
-
-d '{
-
"content": "Your content here",
-
"content_type": "social_media",
-
"compliance_level": "strict",
-
"channel": "twitter",
-
"audience": "b2b_professionals"
-
}'
-
-
# Validate specific aspect
-
curl -X POST http://localhost:3000/api/v1/brands/#{brand.id}/compliance/validate_aspect \\
-
-H "Content-Type: application/json" \\
-
-H "Authorization: Bearer YOUR_TOKEN" \\
-
-d '{
-
"aspect": "tone",
-
"content": "Your content here"
-
}'
-
-
# Preview fix for violation
-
curl -X POST http://localhost:3000/api/v1/brands/#{brand.id}/compliance/preview_fix \\
-
-H "Content-Type: application/json" \\
-
-H "Authorization: Bearer YOUR_TOKEN" \\
-
-d '{
-
"violation": {
-
"id": "tone_1",
-
"type": "tone_mismatch",
-
"severity": "medium",
-
"details": {
-
"expected": "professional",
-
"detected": "casual"
-
}
-
},
-
"content": "Your content here"
-
}'
-
CURL
-
-
# 7. Real-time updates via ActionCable
-
puts "\n=== ActionCable Subscription Example ==="
-
puts <<~JS
-
// JavaScript client code
-
const cable = ActionCable.createConsumer('ws://localhost:3000/cable');
-
-
const complianceChannel = cable.subscriptions.create(
-
{
-
channel: 'BrandComplianceChannel',
-
brand_id: #{brand.id},
-
session_id: 'unique-session-id'
-
},
-
{
-
connected() {
-
console.log('Connected to compliance channel');
-
-
// Request compliance check
-
this.perform('check_compliance', {
-
content: 'Content to check',
-
content_type: 'email',
-
async: true
-
});
-
},
-
-
received(data) {
-
switch(data.event) {
-
case 'validation_started':
-
console.log('Validation started:', data);
-
break;
-
case 'violation_detected':
-
console.log('Violation found:', data.violation);
-
break;
-
case 'validation_complete':
-
console.log('Validation complete:', data);
-
break;
-
}
-
}
-
}
-
);
-
JS
-
-
# 8. Caching and performance
-
puts "\n=== Cache Management ==="
-
cache_stats = Branding::Compliance::CacheService.cache_statistics(brand.id)
-
puts "Cache statistics: #{cache_stats}"
-
-
# Warm cache for better performance
-
Branding::Compliance::CacheWarmerJob.perform_later(brand.id)
-
puts "Cache warming job queued"
-
-
# 9. Compliance history and analytics
-
puts "\n=== Compliance Analytics ==="
-
recent_results = brand.compliance_results.recent.limit(10)
-
puts "Recent checks: #{recent_results.count}"
-
puts "Average score: #{brand.compliance_results.average_score}"
-
puts "Compliance rate: #{brand.compliance_results.compliance_rate}%"
-
puts "Common violations: #{brand.compliance_results.common_violations(3)}"
-
-
rescue => e
-
puts "Error: #{e.message}"
-
puts e.backtrace.first(5)
-
end
-
-
# Advanced configuration example
-
def self.configure_compliance_service
-
# Configure global settings
-
Branding::ComplianceServiceV2.configure do |config|
-
config.cache_store = Rails.cache
-
config.broadcast_violations = true
-
config.async_processing = true
-
config.max_processing_time = 60.seconds
-
end
-
-
# Custom validator example
-
class CustomIndustryValidator < Branding::Compliance::BaseValidator
-
def validate
-
# Custom industry-specific validation logic
-
if brand.industry == "healthcare" && content.match?(/medical claim/i)
-
add_violation(
-
type: "unverified_medical_claim",
-
severity: "high",
-
message: "Medical claims must be verified and include disclaimers"
-
)
-
end
-
-
{ violations: @violations, suggestions: @suggestions }
-
end
-
end
-
-
# Use with custom validator
-
brand = Brand.first
-
service = Branding::ComplianceServiceV2.new(
-
brand,
-
"Content with medical claims",
-
"article",
-
{ custom_validators: [CustomIndustryValidator.new(brand, "content")] }
-
)
-
end
-
end
-
end
-
-
# To run the demonstration:
-
# rails runner "Branding::ComplianceUsageExample.demonstrate"
-
class CampaignAnalyticsService
-
def initialize(campaign)
-
@campaign = campaign
-
end
-
-
def generate_comprehensive_report(period = 'daily', days = 30)
-
start_date = days.days.ago
-
end_date = Time.current
-
-
{
-
campaign_overview: campaign_overview,
-
performance_summary: performance_summary(start_date, end_date),
-
journey_performance: journey_performance_breakdown(period, days),
-
conversion_analysis: conversion_analysis(start_date, end_date),
-
persona_insights: persona_insights,
-
ab_test_results: ab_test_results,
-
recommendations: generate_recommendations,
-
period_info: {
-
start_date: start_date,
-
end_date: end_date,
-
period: period,
-
days: days
-
}
-
}
-
end
-
-
def campaign_overview
-
{
-
id: @campaign.id,
-
name: @campaign.name,
-
status: @campaign.status,
-
type: @campaign.campaign_type,
-
persona: @campaign.persona.name,
-
duration_days: @campaign.duration_days,
-
total_journeys: @campaign.total_journeys,
-
active_journeys: @campaign.active_journeys,
-
progress_percentage: @campaign.progress_percentage
-
}
-
end
-
-
def performance_summary(start_date, end_date)
-
journeys = @campaign.journeys.published
-
total_performance = @campaign.performance_summary
-
-
# Aggregate journey analytics
-
analytics = JourneyAnalytics.joins(:journey)
-
.where(journeys: { campaign_id: @campaign.id })
-
.where(period_start: start_date..end_date)
-
-
return total_performance if analytics.empty?
-
-
{
-
total_executions: analytics.sum(:total_executions),
-
completed_executions: analytics.sum(:completed_executions),
-
abandoned_executions: analytics.sum(:abandoned_executions),
-
overall_conversion_rate: analytics.average(:conversion_rate)&.round(2) || 0,
-
overall_engagement_score: analytics.average(:engagement_score)&.round(2) || 0,
-
average_completion_time: analytics.average(:average_completion_time)&.round(2) || 0,
-
trends: calculate_performance_trends(analytics)
-
}
-
end
-
-
def journey_performance_breakdown(period = 'daily', days = 30)
-
journeys = @campaign.journeys.published.includes(:journey_analytics)
-
-
journeys.map do |journey|
-
analytics_summary = journey.analytics_summary(days)
-
latest_performance = journey.latest_performance_score
-
-
{
-
journey_id: journey.id,
-
journey_name: journey.name,
-
status: journey.status,
-
performance_score: latest_performance,
-
analytics: analytics_summary,
-
funnel_data: journey.funnel_performance('default', days),
-
ab_test_status: journey.ab_test_status
-
}
-
end
-
end
-
-
def conversion_analysis(start_date, end_date)
-
funnels = ConversionFunnel.joins(:journey)
-
.where(journeys: { campaign_id: @campaign.id })
-
.where(period_start: start_date..end_date)
-
.group(:funnel_name, :stage)
-
.sum(:conversions)
-
-
stage_performance = funnels.group_by { |key, _| key[1] } # Group by stage
-
.transform_values { |stage_data| stage_data.sum { |_, conversions| conversions } }
-
-
{
-
total_conversions: funnels.values.sum,
-
conversions_by_stage: stage_performance,
-
funnel_efficiency: calculate_funnel_efficiency(funnels),
-
bottlenecks: identify_conversion_bottlenecks(stage_performance)
-
}
-
end
-
-
def persona_insights
-
persona = @campaign.persona
-
-
return {} unless persona
-
-
{
-
persona_name: persona.name,
-
demographics_summary: persona.demographics_summary,
-
behavior_summary: persona.behavior_summary,
-
campaign_alignment: analyze_campaign_persona_alignment,
-
performance_by_segment: calculate_segment_performance
-
}
-
end
-
-
def ab_test_results
-
tests = @campaign.ab_tests.includes(:ab_test_variants)
-
-
return [] if tests.empty?
-
-
tests.map do |test|
-
{
-
test_name: test.name,
-
status: test.status,
-
duration_days: test.duration_days,
-
statistical_significance: test.statistical_significance_reached?,
-
winner: test.winner_variant&.name,
-
results_summary: test.results_summary,
-
variant_comparison: test.variant_comparison,
-
recommendation: test.recommend_action
-
}
-
end
-
end
-
-
def generate_recommendations
-
recommendations = []
-
-
# Performance-based recommendations
-
performance = performance_summary(30.days.ago, Time.current)
-
-
if performance[:overall_conversion_rate] < 5.0
-
recommendations << {
-
type: 'conversion_optimization',
-
priority: 'high',
-
title: 'Low Conversion Rate Detected',
-
description: "Campaign conversion rate (#{performance[:overall_conversion_rate]}%) is below industry average (5%). Consider optimizing journey steps or messaging.",
-
action_items: [
-
'Review journey flow for friction points',
-
'A/B test call-to-action messages',
-
'Analyze drop-off points in conversion funnel'
-
]
-
}
-
end
-
-
if performance[:overall_engagement_score] < 60.0
-
recommendations << {
-
type: 'engagement_improvement',
-
priority: 'medium',
-
title: 'Engagement Score Below Target',
-
description: "Engagement score (#{performance[:overall_engagement_score]}) suggests users are not fully interacting with journey content.",
-
action_items: [
-
'Review content relevance to persona',
-
'Optimize content for mobile devices',
-
'Add interactive elements to journey steps'
-
]
-
}
-
end
-
-
# Journey-specific recommendations
-
journey_performances = journey_performance_breakdown
-
-
low_performing_journeys = journey_performances.select { |j| j[:performance_score] < 50.0 }
-
if low_performing_journeys.any?
-
recommendations << {
-
type: 'journey_optimization',
-
priority: 'high',
-
title: 'Underperforming Journeys Identified',
-
description: "#{low_performing_journeys.count} journey(s) have performance scores below 50%.",
-
action_items: [
-
'Review underperforming journey content',
-
'Consider A/B testing alternative approaches',
-
'Analyze persona-journey alignment'
-
],
-
affected_journeys: low_performing_journeys.map { |j| j[:journey_name] }
-
}
-
end
-
-
# A/B test recommendations
-
ab_results = ab_test_results
-
-
completed_tests = ab_results.select { |test| test[:status] == 'completed' }
-
if completed_tests.any? { |test| test[:winner] }
-
winners = completed_tests.select { |test| test[:winner] }.map { |test| test[:winner] }
-
recommendations << {
-
type: 'ab_test_implementation',
-
priority: 'high',
-
title: 'Implement A/B Test Winners',
-
description: "#{winners.count} A/B test(s) have identified winning variants ready for implementation.",
-
action_items: [
-
'Deploy winning variants to all traffic',
-
'Monitor performance after implementation',
-
'Plan next round of optimization tests'
-
],
-
winning_variants: winners
-
}
-
end
-
-
recommendations
-
end
-
-
def calculate_roi(investment_amount = nil)
-
return {} unless investment_amount
-
-
performance = performance_summary(30.days.ago, Time.current)
-
total_conversions = performance[:completed_executions] || 0
-
-
# This would integrate with actual revenue tracking
-
# For now, use placeholder calculations
-
estimated_revenue_per_conversion = @campaign.target_metrics['revenue_per_conversion'] || 100
-
total_revenue = total_conversions * estimated_revenue_per_conversion
-
-
roi_percentage = investment_amount > 0 ? ((total_revenue - investment_amount) / investment_amount * 100) : 0
-
-
{
-
investment: investment_amount,
-
estimated_revenue: total_revenue,
-
net_profit: total_revenue - investment_amount,
-
roi_percentage: roi_percentage.round(1),
-
cost_per_conversion: total_conversions > 0 ? (investment_amount / total_conversions).round(2) : 0,
-
conversion_value: estimated_revenue_per_conversion
-
}
-
end
-
-
def export_data(format = 'json')
-
data = generate_comprehensive_report
-
-
case format
-
when 'csv'
-
export_to_csv(data)
-
when 'json'
-
data.to_json
-
else
-
data
-
end
-
end
-
-
private
-
-
def calculate_performance_trends(analytics)
-
return {} if analytics.count < 2
-
-
# Calculate week-over-week trends
-
this_week = analytics.where('period_start >= ?', 1.week.ago)
-
last_week = analytics.where('period_start >= ? AND period_start < ?', 2.weeks.ago, 1.week.ago)
-
-
return {} if this_week.empty? || last_week.empty?
-
-
{
-
conversion_rate: calculate_trend_change(
-
last_week.average(:conversion_rate),
-
this_week.average(:conversion_rate)
-
),
-
engagement_score: calculate_trend_change(
-
last_week.average(:engagement_score),
-
this_week.average(:engagement_score)
-
),
-
total_executions: calculate_trend_change(
-
last_week.sum(:total_executions),
-
this_week.sum(:total_executions)
-
)
-
}
-
end
-
-
def calculate_trend_change(old_value, new_value)
-
return 0 if old_value.nil? || new_value.nil? || old_value == 0
-
-
change_percentage = ((new_value - old_value) / old_value * 100).round(1)
-
-
{
-
previous_value: old_value.round(2),
-
current_value: new_value.round(2),
-
change_percentage: change_percentage,
-
trend: change_percentage > 5 ? 'up' : (change_percentage < -5 ? 'down' : 'stable')
-
}
-
end
-
-
def calculate_funnel_efficiency(funnels)
-
return {} if funnels.empty?
-
-
stage_totals = funnels.group_by { |key, _| key[1] } # Group by stage
-
.transform_values { |stage_data| stage_data.sum { |_, conversions| conversions } }
-
-
stages = Journey::STAGES
-
efficiencies = {}
-
-
stages.each_with_index do |stage, index|
-
next if index == 0 # Skip first stage
-
-
previous_stage = stages[index - 1]
-
current_conversions = stage_totals[stage] || 0
-
previous_conversions = stage_totals[previous_stage] || 0
-
-
efficiency = previous_conversions > 0 ? (current_conversions.to_f / previous_conversions * 100).round(1) : 0
-
efficiencies["#{previous_stage}_to_#{stage}"] = efficiency
-
end
-
-
efficiencies
-
end
-
-
def identify_conversion_bottlenecks(stage_performance)
-
return [] if stage_performance.empty?
-
-
sorted_stages = stage_performance.sort_by { |_, conversions| conversions }
-
lowest_performing = sorted_stages.first(2)
-
-
lowest_performing.map do |stage, conversions|
-
{
-
stage: stage,
-
conversions: conversions,
-
severity: conversions < (stage_performance.values.sum / stage_performance.count) * 0.5 ? 'high' : 'medium'
-
}
-
end
-
end
-
-
def analyze_campaign_persona_alignment
-
# Analyze how well the campaign aligns with persona preferences
-
persona = @campaign.persona
-
journeys = @campaign.journeys
-
-
channel_alignment = analyze_channel_alignment(persona, journeys)
-
messaging_alignment = analyze_messaging_alignment(persona, journeys)
-
-
{
-
overall_score: (channel_alignment + messaging_alignment) / 2,
-
channel_alignment: channel_alignment,
-
messaging_alignment: messaging_alignment,
-
suggestions: generate_alignment_suggestions(channel_alignment, messaging_alignment)
-
}
-
end
-
-
def analyze_channel_alignment(persona, journeys)
-
preferred_channels = persona.preferences['channel_preferences'] || []
-
return 70 if preferred_channels.empty? # Default score if no preferences
-
-
used_channels = journeys.flat_map { |j| j.journey_steps.pluck(:channel) }.compact.uniq
-
-
matching_channels = (preferred_channels & used_channels).count
-
total_preferred = preferred_channels.count
-
-
total_preferred > 0 ? (matching_channels.to_f / total_preferred * 100).round : 70
-
end
-
-
def analyze_messaging_alignment(persona, journeys)
-
preferred_tone = persona.preferences['messaging_tone']
-
return 70 unless preferred_tone # Default score if no preference
-
-
# This would analyze actual journey content for tone
-
# For now, return a placeholder score
-
75
-
end
-
-
def generate_alignment_suggestions(channel_score, messaging_score)
-
suggestions = []
-
-
if channel_score < 60
-
suggestions << "Consider incorporating more preferred channels from persona profile"
-
end
-
-
if messaging_score < 60
-
suggestions << "Review messaging tone to better match persona preferences"
-
end
-
-
if channel_score > 80 && messaging_score > 80
-
suggestions << "Strong persona alignment - maintain current approach"
-
end
-
-
suggestions
-
end
-
-
def calculate_segment_performance
-
# This would break down performance by demographic segments
-
# For now, return placeholder data
-
{
-
age_segments: {
-
'18-25' => { conversion_rate: 4.2, engagement_score: 78 },
-
'26-35' => { conversion_rate: 6.1, engagement_score: 82 },
-
'36-45' => { conversion_rate: 5.8, engagement_score: 75 }
-
},
-
location_segments: {
-
'urban' => { conversion_rate: 5.9, engagement_score: 80 },
-
'suburban' => { conversion_rate: 5.2, engagement_score: 76 },
-
'rural' => { conversion_rate: 4.8, engagement_score: 72 }
-
}
-
}
-
end
-
-
def export_to_csv(data)
-
# This would convert the analytics data to CSV format
-
# Implementation would depend on specific CSV requirements
-
"CSV export functionality would be implemented here"
-
end
-
end
-
module Journey
-
class BrandComplianceService
-
include ActiveSupport::Configurable
-
-
config_accessor :default_compliance_level, default: :standard
-
config_accessor :cache_results, default: true
-
config_accessor :async_processing, default: false
-
config_accessor :broadcast_violations, default: true
-
-
attr_reader :journey, :step, :brand, :content, :content_type, :context, :results
-
-
# Content types specific to journey steps
-
JOURNEY_CONTENT_TYPES = {
-
'email' => 'email_content',
-
'blog_post' => 'blog_content',
-
'social_post' => 'social_media_content',
-
'landing_page' => 'web_content',
-
'video' => 'video_script',
-
'webinar' => 'presentation_content',
-
'advertisement' => 'advertising_content',
-
'newsletter' => 'email_content'
-
}.freeze
-
-
def initialize(journey:, step: nil, content:, context: {})
-
@journey = journey
-
@step = step
-
@brand = journey.brand
-
@content = content
-
@context = context.with_indifferent_access
-
@content_type = determine_content_type
-
@results = {}
-
-
validate_initialization
-
end
-
-
# Main method to check compliance for journey content
-
def check_compliance(options = {})
-
return no_brand_compliance_result unless brand.present?
-
-
compliance_options = build_compliance_options(options)
-
-
# Create compliance service instance
-
compliance_service = Branding::ComplianceServiceV2.new(
-
brand,
-
content,
-
@content_type,
-
compliance_options
-
)
-
-
# Perform compliance check
-
@results = compliance_service.check_compliance
-
-
# Add journey-specific metadata
-
enhance_results_with_journey_context
-
-
# Store compliance insights
-
store_compliance_insights if options[:store_insights] != false
-
-
# Broadcast real-time updates
-
broadcast_compliance_results if config.broadcast_violations
-
-
@results
-
rescue StandardError => e
-
handle_compliance_error(e)
-
end
-
-
# Pre-generation compliance check for suggested content
-
def pre_generation_check(suggested_content, options = {})
-
return { allowed: true, suggestions: [] } unless brand.present?
-
-
# Quick compliance check for content suggestions
-
compliance_options = build_compliance_options(options.merge(
-
generate_suggestions: false,
-
cache_results: false
-
))
-
-
compliance_service = Branding::ComplianceServiceV2.new(
-
brand,
-
suggested_content,
-
@content_type,
-
compliance_options
-
)
-
-
results = compliance_service.check_compliance
-
-
{
-
allowed: results[:compliant],
-
score: results[:score],
-
violations: results[:violations] || [],
-
suggestions: results[:suggestions] || [],
-
quick_check: true
-
}
-
end
-
-
# Validate content against specific brand aspects
-
def validate_aspects(aspects, options = {})
-
return no_brand_compliance_result unless brand.present?
-
-
compliance_options = build_compliance_options(options)
-
-
compliance_service = Branding::ComplianceServiceV2.new(
-
brand,
-
content,
-
@content_type,
-
compliance_options
-
)
-
-
@results = compliance_service.check_specific_aspects(aspects)
-
enhance_results_with_journey_context
-
-
@results
-
end
-
-
# Auto-fix compliance violations
-
def auto_fix_violations(options = {})
-
return no_brand_compliance_result unless brand.present?
-
-
compliance_options = build_compliance_options(options)
-
-
compliance_service = Branding::ComplianceServiceV2.new(
-
brand,
-
content,
-
@content_type,
-
compliance_options
-
)
-
-
fix_results = compliance_service.validate_and_fix
-
-
if fix_results[:fixed_content].present?
-
@content = fix_results[:fixed_content]
-
end
-
-
@results = fix_results
-
enhance_results_with_journey_context
-
-
@results
-
end
-
-
# Get compliance recommendations for improving the content
-
def get_recommendations(options = {})
-
return { recommendations: [] } unless brand.present?
-
-
# First check current compliance
-
compliance_results = check_compliance(options)
-
-
# Get intelligent suggestions for improvements
-
compliance_service = Branding::ComplianceServiceV2.new(
-
brand,
-
content,
-
@content_type,
-
build_compliance_options(options)
-
)
-
-
recommendations = compliance_service.preview_fixes(compliance_results[:violations])
-
-
{
-
current_score: compliance_results[:score],
-
recommendations: recommendations,
-
priority_fixes: filter_priority_recommendations(recommendations),
-
estimated_improvement: calculate_estimated_improvement(recommendations)
-
}
-
end
-
-
# Check if content meets minimum compliance threshold
-
def meets_minimum_compliance?(threshold = nil)
-
results = check_compliance
-
threshold ||= compliance_threshold_for_level(config.default_compliance_level)
-
-
results[:score] >= threshold && results[:compliant]
-
end
-
-
# Get compliance score without full validation
-
def quick_score
-
return 1.0 unless brand.present?
-
-
compliance_service = Branding::ComplianceServiceV2.new(
-
brand,
-
content,
-
@content_type,
-
{ generate_suggestions: false, cache_results: true }
-
)
-
-
results = compliance_service.check_compliance
-
results[:score] || 0.0
-
end
-
-
# Get brand-specific validation rules for the content type
-
def applicable_brand_rules
-
return [] unless brand.present?
-
-
brand.brand_guidelines
-
.active
-
.where(category: content_category_mapping)
-
.or(brand.brand_guidelines.active.where(rule_type: 'universal'))
-
.order(priority: :desc)
-
end
-
-
# Check if specific messaging is allowed
-
def messaging_allowed?(message_text)
-
return true unless brand&.messaging_framework.present?
-
-
framework = brand.messaging_framework
-
-
# Check for banned words
-
banned_words = framework.banned_words || []
-
contains_banned = banned_words.any? { |word| message_text.downcase.include?(word.downcase) }
-
-
# Check tone compliance
-
tone_compliant = check_message_tone_compliance(message_text, framework.tone_attributes || {})
-
-
!contains_banned && tone_compliant
-
end
-
-
private
-
-
def validate_initialization
-
raise ArgumentError, "Journey is required" unless journey.present?
-
raise ArgumentError, "Content is required" unless content.present?
-
end
-
-
def determine_content_type
-
if step.present?
-
JOURNEY_CONTENT_TYPES[step.content_type] || step.content_type || 'general'
-
else
-
context[:content_type] || 'general'
-
end
-
end
-
-
def build_compliance_options(options = {})
-
base_options = {
-
compliance_level: config.default_compliance_level,
-
async: config.async_processing,
-
generate_suggestions: true,
-
real_time_updates: config.broadcast_violations,
-
cache_results: config.cache_results,
-
channel: step&.channel || context[:channel],
-
audience: journey.target_audience,
-
campaign_context: build_campaign_context
-
}
-
-
base_options.merge(options)
-
end
-
-
def build_campaign_context
-
{
-
journey_id: journey.id,
-
journey_name: journey.name,
-
campaign_type: journey.campaign_type,
-
journey_stage: step&.stage,
-
step_position: step&.position,
-
target_audience: journey.target_audience,
-
goals: journey.goals
-
}
-
end
-
-
def enhance_results_with_journey_context
-
return unless @results.is_a?(Hash)
-
-
@results[:journey_context] = {
-
journey_id: journey.id,
-
journey_name: journey.name,
-
step_id: step&.id,
-
step_name: step&.name,
-
content_type: @content_type,
-
checked_at: Time.current
-
}
-
-
# Add step-specific recommendations
-
if step.present?
-
@results[:step_recommendations] = generate_step_specific_recommendations
-
end
-
-
# Add journey-level compliance trends
-
@results[:compliance_trend] = calculate_journey_compliance_trend
-
end
-
-
def generate_step_specific_recommendations
-
recommendations = []
-
-
# Recommend content types that perform better for this stage
-
if step.stage.present?
-
stage_recommendations = get_stage_specific_recommendations(step.stage)
-
recommendations.concat(stage_recommendations)
-
end
-
-
# Recommend channels with better brand compliance
-
if step.channel.present?
-
channel_recommendations = get_channel_specific_recommendations(step.channel)
-
recommendations.concat(channel_recommendations)
-
end
-
-
recommendations.uniq
-
end
-
-
def get_stage_specific_recommendations(stage)
-
case stage
-
when 'awareness'
-
[
-
'Focus on brand storytelling and value proposition',
-
'Use approved brand messaging for first impressions',
-
'Ensure visual consistency with brand guidelines'
-
]
-
when 'consideration'
-
[
-
'Highlight key differentiators from messaging framework',
-
'Use case studies that align with brand voice',
-
'Maintain consistent tone across comparison content'
-
]
-
when 'conversion'
-
[
-
'Use approved call-to-action phrases',
-
'Ensure urgency messaging aligns with brand tone',
-
'Maintain brand voice in promotional content'
-
]
-
when 'retention'
-
[
-
'Use consistent brand voice in ongoing communications',
-
'Apply brand guidelines to support content',
-
'Maintain visual brand consistency'
-
]
-
when 'advocacy'
-
[
-
'Encourage brand-aligned testimonials',
-
'Use consistent brand messaging in referral content',
-
'Ensure social sharing aligns with brand guidelines'
-
]
-
else
-
[]
-
end
-
end
-
-
def get_channel_specific_recommendations(channel)
-
case channel
-
when 'email'
-
['Ensure email templates follow brand visual guidelines', 'Use approved email signature and branding']
-
when 'social_media', 'facebook', 'instagram', 'twitter', 'linkedin'
-
['Use brand-approved hashtags', 'Maintain consistent visual style', 'Follow social media brand guidelines']
-
when 'website'
-
['Ensure web content follows brand typography', 'Use approved color schemes', 'Follow brand content guidelines']
-
else
-
[]
-
end
-
end
-
-
def calculate_journey_compliance_trend
-
return nil unless journey.journey_steps.any?
-
-
# Get recent compliance scores for this journey
-
recent_insights = journey.journey_insights
-
.where(insights_type: 'brand_compliance')
-
.where('calculated_at >= ?', 7.days.ago)
-
.order(calculated_at: :desc)
-
.limit(10)
-
-
return nil if recent_insights.empty?
-
-
scores = recent_insights.map { |insight| insight.data['score'] }.compact
-
return nil if scores.empty?
-
-
{
-
average_score: scores.sum.to_f / scores.length,
-
trend: calculate_trend(scores),
-
total_checks: scores.length,
-
latest_score: scores.first
-
}
-
end
-
-
def calculate_trend(scores)
-
return 'stable' if scores.length < 2
-
-
recent_avg = scores.first(3).sum.to_f / [scores.first(3).length, 1].max
-
older_avg = scores.last(3).sum.to_f / [scores.last(3).length, 1].max
-
-
diff = recent_avg - older_avg
-
-
if diff > 0.05
-
'improving'
-
elsif diff < -0.05
-
'declining'
-
else
-
'stable'
-
end
-
end
-
-
def store_compliance_insights
-
return unless journey.present?
-
-
insight_data = {
-
score: @results[:score],
-
compliant: @results[:compliant],
-
violations_count: (@results[:violations] || []).length,
-
suggestions_count: (@results[:suggestions] || []).length,
-
content_type: @content_type,
-
step_id: step&.id,
-
brand_id: brand&.id,
-
detailed_results: @results.except(:journey_context)
-
}
-
-
journey.journey_insights.create!(
-
insights_type: 'brand_compliance',
-
data: insight_data,
-
calculated_at: Time.current,
-
expires_at: 7.days.from_now,
-
metadata: {
-
brand_name: brand&.name,
-
content_length: content.length,
-
step_name: step&.name
-
}
-
)
-
rescue => e
-
Rails.logger.error "Failed to store compliance insights: #{e.message}"
-
end
-
-
def broadcast_compliance_results
-
return unless journey.present? && brand.present?
-
-
ActionCable.server.broadcast(
-
"journey_compliance_#{journey.id}",
-
{
-
event: 'compliance_check_complete',
-
journey_id: journey.id,
-
step_id: step&.id,
-
brand_id: brand.id,
-
compliant: @results[:compliant],
-
score: @results[:score],
-
violations_count: (@results[:violations] || []).length,
-
timestamp: Time.current
-
}
-
)
-
rescue => e
-
Rails.logger.error "Failed to broadcast compliance results: #{e.message}"
-
end
-
-
def no_brand_compliance_result
-
{
-
compliant: true,
-
score: 1.0,
-
summary: "No brand guidelines to check against",
-
violations: [],
-
suggestions: [],
-
journey_context: {
-
journey_id: journey.id,
-
no_brand: true
-
}
-
}
-
end
-
-
def handle_compliance_error(error)
-
Rails.logger.error "Journey compliance check failed: #{error.message}"
-
Rails.logger.error error.backtrace.join("\n")
-
-
{
-
compliant: false,
-
error: error.message,
-
error_type: error.class.name,
-
score: 0.0,
-
violations: [],
-
suggestions: [],
-
summary: "Compliance check failed due to an error",
-
journey_context: {
-
journey_id: journey.id,
-
error_occurred: true
-
}
-
}
-
end
-
-
def filter_priority_recommendations(recommendations)
-
return [] unless recommendations.is_a?(Hash)
-
-
recommendations.select do |_, recommendation|
-
recommendation[:confidence] > 0.7 && recommendation[:impact] == 'high'
-
end
-
end
-
-
def calculate_estimated_improvement(recommendations)
-
return 0.0 unless recommendations.is_a?(Hash)
-
-
# Estimate improvement based on number and confidence of recommendations
-
high_impact_fixes = recommendations.count { |_, rec| rec[:confidence] > 0.8 }
-
medium_impact_fixes = recommendations.count { |_, rec| rec[:confidence] > 0.5 && rec[:confidence] <= 0.8 }
-
-
# Rough improvement estimation
-
(high_impact_fixes * 0.15) + (medium_impact_fixes * 0.08)
-
end
-
-
def compliance_threshold_for_level(level)
-
case level.to_sym
-
when :strict then 0.95
-
when :standard then 0.85
-
when :flexible then 0.70
-
when :advisory then 0.50
-
else 0.85
-
end
-
end
-
-
def content_category_mapping
-
case @content_type
-
when 'email_content', 'newsletter'
-
'messaging'
-
when 'social_media_content', 'social_post'
-
'social_media'
-
when 'web_content', 'landing_page'
-
'website'
-
when 'advertising_content'
-
'advertising'
-
when 'video_script'
-
'multimedia'
-
else
-
'general'
-
end
-
end
-
-
def check_message_tone_compliance(message_text, tone_attributes)
-
return true if tone_attributes.empty?
-
-
content = message_text.downcase
-
-
# Check formality level
-
if tone_attributes['formality'] == 'formal'
-
informal_patterns = ['hey', 'yeah', 'cool', 'awesome', 'gonna', 'wanna', '!', 'lol', 'omg']
-
return false if informal_patterns.any? { |pattern| content.include?(pattern) }
-
elsif tone_attributes['formality'] == 'casual'
-
formal_patterns = ['utilize', 'facilitate', 'endeavor', 'subsequently', 'henceforth']
-
return false if formal_patterns.any? { |pattern| content.include?(pattern) }
-
end
-
-
# Check style requirements
-
if tone_attributes['style'] == 'professional'
-
unprofessional_patterns = ['slang', 'yo', 'dude', 'bro', 'sick', 'lit']
-
return false if unprofessional_patterns.any? { |pattern| content.include?(pattern) }
-
end
-
-
true
-
end
-
end
-
end
-
module Journey
-
class BrandIntegrationService
-
include ActiveSupport::Configurable
-
-
config_accessor :enable_real_time_validation, default: true
-
config_accessor :enable_auto_suggestions, default: true
-
config_accessor :compliance_check_threshold, default: 0.7
-
config_accessor :auto_fix_enabled, default: false
-
-
attr_reader :journey, :user, :integration_context
-
-
def initialize(journey:, user: nil, context: {})
-
@journey = journey
-
@user = user || journey.user
-
@integration_context = context.with_indifferent_access
-
@results = {}
-
end
-
-
# Main orchestration method for brand-aware journey operations
-
def orchestrate_brand_journey_flow(operation:, **options)
-
case operation.to_sym
-
when :generate_suggestions
-
orchestrate_brand_aware_suggestions(options)
-
when :validate_content
-
orchestrate_content_validation(options)
-
when :auto_enhance_compliance
-
orchestrate_compliance_enhancement(options)
-
when :analyze_brand_performance
-
orchestrate_brand_performance_analysis(options)
-
when :sync_brand_updates
-
orchestrate_brand_sync(options)
-
else
-
raise ArgumentError, "Unknown operation: #{operation}"
-
end
-
end
-
-
# Generate brand-aware journey suggestions
-
def orchestrate_brand_aware_suggestions(options = {})
-
return no_brand_suggestions_result unless journey.brand.present?
-
-
# Initialize suggestion engine with brand context
-
suggestion_engine = JourneySuggestionEngine.new(
-
journey: journey,
-
user: user,
-
current_step: options[:current_step],
-
provider: options[:provider] || :openai
-
)
-
-
# Generate suggestions with brand filtering
-
raw_suggestions = suggestion_engine.generate_suggestions(options[:filters] || {})
-
-
# Apply additional brand compliance filtering
-
compliant_suggestions = filter_suggestions_for_brand_compliance(raw_suggestions)
-
-
# Enhance suggestions with brand-specific recommendations
-
enhanced_suggestions = enhance_suggestions_with_brand_insights(compliant_suggestions)
-
-
# Store integration results
-
store_integration_insights('brand_aware_suggestions', {
-
total_suggestions: raw_suggestions.length,
-
compliant_suggestions: compliant_suggestions.length,
-
enhanced_suggestions: enhanced_suggestions.length,
-
suggestions: enhanced_suggestions
-
})
-
-
{
-
success: true,
-
suggestions: enhanced_suggestions,
-
brand_integration: {
-
brand_filtered: raw_suggestions.length - compliant_suggestions.length,
-
brand_enhanced: enhanced_suggestions.length - compliant_suggestions.length,
-
compliance_applied: true
-
}
-
}
-
rescue => e
-
handle_integration_error(e, 'suggestion_generation')
-
end
-
-
# Validate journey content against brand guidelines
-
def orchestrate_content_validation(options = {})
-
return no_brand_validation_result unless journey.brand.present?
-
-
validation_results = []
-
steps_to_validate = determine_validation_scope(options)
-
-
steps_to_validate.each do |step|
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: step,
-
content: step.description || step.name,
-
context: build_step_context(step)
-
)
-
-
step_result = compliance_service.check_compliance(options[:compliance_options] || {})
-
step_result[:step_id] = step.id
-
step_result[:step_name] = step.name
-
-
validation_results << step_result
-
end
-
-
# Calculate overall journey compliance
-
overall_compliance = calculate_overall_journey_compliance(validation_results)
-
-
# Generate actionable recommendations
-
recommendations = generate_journey_compliance_recommendations(validation_results, overall_compliance)
-
-
# Store validation insights
-
store_integration_insights('content_validation', {
-
overall_compliance: overall_compliance,
-
step_results: validation_results,
-
recommendations: recommendations,
-
validated_steps: steps_to_validate.length
-
})
-
-
{
-
success: true,
-
overall_compliance: overall_compliance,
-
step_results: validation_results,
-
recommendations: recommendations,
-
validation_summary: build_validation_summary(validation_results)
-
}
-
rescue => e
-
handle_integration_error(e, 'content_validation')
-
end
-
-
# Auto-enhance journey content for better brand compliance
-
def orchestrate_compliance_enhancement(options = {})
-
return no_brand_enhancement_result unless journey.brand.present? && config.auto_fix_enabled
-
-
enhancement_results = []
-
steps_to_enhance = determine_enhancement_scope(options)
-
-
steps_to_enhance.each do |step|
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: step,
-
content: step.description || step.name,
-
context: build_step_context(step)
-
)
-
-
# Check current compliance
-
current_compliance = compliance_service.check_compliance
-
-
if current_compliance[:score] < config.compliance_check_threshold
-
# Attempt auto-fix
-
fix_result = compliance_service.auto_fix_violations
-
-
if fix_result[:fixed_content].present?
-
# Update step with fixed content
-
step.update!(description: fix_result[:fixed_content])
-
-
enhancement_results << {
-
step_id: step.id,
-
step_name: step.name,
-
enhanced: true,
-
original_score: current_compliance[:score],
-
improved_score: compliance_service.quick_score,
-
fixes_applied: fix_result[:fixes_applied] || []
-
}
-
else
-
enhancement_results << {
-
step_id: step.id,
-
step_name: step.name,
-
enhanced: false,
-
original_score: current_compliance[:score],
-
issues: current_compliance[:violations] || []
-
}
-
end
-
else
-
enhancement_results << {
-
step_id: step.id,
-
step_name: step.name,
-
enhanced: false,
-
original_score: current_compliance[:score],
-
already_compliant: true
-
}
-
end
-
end
-
-
# Store enhancement insights
-
store_integration_insights('compliance_enhancement', {
-
enhancement_results: enhancement_results,
-
steps_processed: steps_to_enhance.length,
-
steps_enhanced: enhancement_results.count { |r| r[:enhanced] }
-
})
-
-
{
-
success: true,
-
enhancement_results: enhancement_results,
-
summary: build_enhancement_summary(enhancement_results)
-
}
-
rescue => e
-
handle_integration_error(e, 'compliance_enhancement')
-
end
-
-
# Analyze brand performance across the journey
-
def orchestrate_brand_performance_analysis(options = {})
-
return no_brand_analysis_result unless journey.brand.present?
-
-
analysis_period = options[:period_days] || 30
-
-
# Gather brand compliance analytics
-
compliance_summary = journey.brand_compliance_summary(analysis_period)
-
compliance_by_step = journey.brand_compliance_by_step(analysis_period)
-
violations_breakdown = journey.brand_violations_breakdown(analysis_period)
-
-
# Analyze brand health trends
-
brand_health = journey.overall_brand_health_score
-
compliance_trend = journey.brand_compliance_trend(analysis_period)
-
alerts = journey.brand_compliance_alerts
-
-
# Generate insights and recommendations
-
performance_insights = generate_brand_performance_insights(
-
compliance_summary,
-
compliance_by_step,
-
violations_breakdown,
-
brand_health,
-
compliance_trend
-
)
-
-
recommendations = generate_brand_performance_recommendations(
-
performance_insights,
-
alerts
-
)
-
-
# Store performance analysis
-
store_integration_insights('brand_performance_analysis', {
-
analysis_period: analysis_period,
-
compliance_summary: compliance_summary,
-
brand_health_score: brand_health,
-
compliance_trend: compliance_trend,
-
insights: performance_insights,
-
recommendations: recommendations,
-
alerts: alerts
-
})
-
-
{
-
success: true,
-
brand_health_score: brand_health,
-
compliance_trend: compliance_trend,
-
compliance_summary: compliance_summary,
-
compliance_by_step: compliance_by_step,
-
violations_breakdown: violations_breakdown,
-
insights: performance_insights,
-
recommendations: recommendations,
-
alerts: alerts
-
}
-
rescue => e
-
handle_integration_error(e, 'brand_performance_analysis')
-
end
-
-
# Sync journey content with updated brand guidelines
-
def orchestrate_brand_sync(options = {})
-
return no_brand_sync_result unless journey.brand.present?
-
-
sync_results = []
-
updated_guidelines = options[:updated_guidelines] || []
-
-
# If no specific guidelines provided, sync all active guidelines
-
if updated_guidelines.empty?
-
updated_guidelines = journey.brand.brand_guidelines.active.pluck(:id)
-
end
-
-
# Re-validate all journey steps against updated guidelines
-
journey.journey_steps.each do |step|
-
compliance_service = Journey::BrandComplianceService.new(
-
journey: journey,
-
step: step,
-
content: step.description || step.name,
-
context: build_step_context(step)
-
)
-
-
# Check compliance with updated guidelines
-
updated_compliance = compliance_service.check_compliance(
-
compliance_level: :standard,
-
force_refresh: true
-
)
-
-
# Compare with previous compliance if available
-
previous_check = step.latest_compliance_check
-
previous_score = previous_check&.data&.dig('score') || 0.0
-
-
sync_results << {
-
step_id: step.id,
-
step_name: step.name,
-
previous_score: previous_score,
-
updated_score: updated_compliance[:score],
-
score_change: updated_compliance[:score] - previous_score,
-
new_violations: updated_compliance[:violations] || [],
-
requires_attention: updated_compliance[:score] < config.compliance_check_threshold
-
}
-
end
-
-
# Generate sync recommendations
-
sync_recommendations = generate_sync_recommendations(sync_results)
-
-
# Store sync insights
-
store_integration_insights('brand_sync', {
-
synced_guidelines: updated_guidelines,
-
sync_results: sync_results,
-
steps_requiring_attention: sync_results.count { |r| r[:requires_attention] },
-
recommendations: sync_recommendations
-
})
-
-
{
-
success: true,
-
sync_results: sync_results,
-
steps_requiring_attention: sync_results.count { |r| r[:requires_attention] },
-
recommendations: sync_recommendations,
-
summary: build_sync_summary(sync_results)
-
}
-
rescue => e
-
handle_integration_error(e, 'brand_sync')
-
end
-
-
# Get integration health status
-
def integration_health_check
-
return { healthy: false, reason: 'No brand associated' } unless journey.brand.present?
-
-
health_indicators = {
-
brand_setup: check_brand_setup_health,
-
journey_compliance: check_journey_compliance_health,
-
integration_performance: check_integration_performance_health,
-
recent_activity: check_recent_activity_health
-
}
-
-
overall_health = health_indicators.values.all? { |indicator| indicator[:healthy] }
-
-
{
-
healthy: overall_health,
-
indicators: health_indicators,
-
recommendations: overall_health ? [] : generate_health_recommendations(health_indicators)
-
}
-
end
-
-
private
-
-
def filter_suggestions_for_brand_compliance(suggestions)
-
return suggestions unless journey.brand.present?
-
-
suggestions.select do |suggestion|
-
# Filter based on brand compliance score
-
compliance_score = suggestion['brand_compliance_score'] || 0.5
-
compliance_score >= config.compliance_check_threshold
-
end
-
end
-
-
def enhance_suggestions_with_brand_insights(suggestions)
-
return suggestions unless journey.brand.present?
-
-
brand_context = extract_brand_enhancement_context
-
-
suggestions.map do |suggestion|
-
enhanced_suggestion = suggestion.dup
-
-
# Add brand-specific enhancements
-
enhanced_suggestion['brand_enhancements'] = generate_brand_enhancements(suggestion, brand_context)
-
enhanced_suggestion['brand_compliance_tips'] = generate_compliance_tips(suggestion, brand_context)
-
-
enhanced_suggestion
-
end
-
end
-
-
def extract_brand_enhancement_context
-
brand = journey.brand
-
-
{
-
messaging_framework: brand.messaging_framework,
-
recent_guidelines: brand.brand_guidelines.active.order(updated_at: :desc).limit(5),
-
voice_attributes: brand.brand_voice_attributes,
-
industry_context: brand.industry
-
}
-
end
-
-
def generate_brand_enhancements(suggestion, brand_context)
-
enhancements = []
-
-
# Messaging framework enhancements
-
if brand_context[:messaging_framework]&.key_messages.present?
-
relevant_messages = find_relevant_key_messages(suggestion, brand_context[:messaging_framework])
-
if relevant_messages.any?
-
enhancements << {
-
type: 'key_messaging',
-
recommendation: "Consider incorporating: #{relevant_messages.join(', ')}",
-
priority: 'high'
-
}
-
end
-
end
-
-
# Voice attribute enhancements
-
if brand_context[:voice_attributes].present?
-
voice_recommendations = generate_voice_recommendations(suggestion, brand_context[:voice_attributes])
-
enhancements.concat(voice_recommendations)
-
end
-
-
enhancements
-
end
-
-
def generate_compliance_tips(suggestion, brand_context)
-
tips = []
-
-
# Content type specific tips
-
content_type = suggestion['content_type']
-
case content_type
-
when 'email'
-
tips << "Ensure email signature includes brand elements"
-
tips << "Use approved email templates if available"
-
when 'social_post'
-
tips << "Include brand hashtags where appropriate"
-
tips << "Follow social media brand voice guidelines"
-
when 'blog_post'
-
tips << "Include brand storytelling elements"
-
tips << "Use brand-approved images and formatting"
-
end
-
-
# Channel specific tips
-
channel = suggestion['channel']
-
if channel == 'website'
-
tips << "Ensure consistent with website brand guidelines"
-
tips << "Use approved fonts and color schemes"
-
end
-
-
tips.uniq
-
end
-
-
def find_relevant_key_messages(suggestion, messaging_framework)
-
# Simple keyword matching - could be enhanced with NLP
-
suggestion_text = "#{suggestion['name']} #{suggestion['description']}".downcase
-
relevant_messages = []
-
-
messaging_framework.key_messages.each do |category, messages|
-
messages.each do |message|
-
if suggestion_text.include?(message.downcase) ||
-
message.downcase.split.any? { |word| suggestion_text.include?(word) }
-
relevant_messages << message
-
end
-
end
-
end
-
-
relevant_messages.uniq.first(3) # Limit to 3 most relevant
-
end
-
-
def generate_voice_recommendations(suggestion, voice_attributes)
-
recommendations = []
-
-
if voice_attributes['tone']
-
recommendations << {
-
type: 'tone_guidance',
-
recommendation: "Maintain #{voice_attributes['tone']} tone throughout content",
-
priority: 'medium'
-
}
-
end
-
-
if voice_attributes['formality']
-
recommendations << {
-
type: 'formality_guidance',
-
recommendation: "Use #{voice_attributes['formality']} language style",
-
priority: 'medium'
-
}
-
end
-
-
recommendations
-
end
-
-
def determine_validation_scope(options)
-
if options[:step_ids].present?
-
journey.journey_steps.where(id: options[:step_ids])
-
elsif options[:stage].present?
-
journey.journey_steps.where(stage: options[:stage])
-
else
-
journey.journey_steps
-
end
-
end
-
-
def determine_enhancement_scope(options)
-
if options[:step_ids].present?
-
journey.journey_steps.where(id: options[:step_ids])
-
elsif options[:low_compliance_only]
-
# Find steps with low compliance scores
-
step_ids_needing_enhancement = []
-
journey.journey_steps.each do |step|
-
if step.quick_compliance_score < config.compliance_check_threshold
-
step_ids_needing_enhancement << step.id
-
end
-
end
-
journey.journey_steps.where(id: step_ids_needing_enhancement)
-
else
-
journey.journey_steps
-
end
-
end
-
-
def build_step_context(step)
-
{
-
step_id: step.id,
-
step_type: step.content_type,
-
channel: step.channel,
-
stage: step.stage,
-
position: step.position,
-
journey_context: {
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience
-
}
-
}
-
end
-
-
def calculate_overall_journey_compliance(validation_results)
-
return { score: 1.0, compliant: true } if validation_results.empty?
-
-
scores = validation_results.map { |result| result[:score] || 0.0 }
-
average_score = scores.sum / scores.length
-
compliant_count = validation_results.count { |result| result[:compliant] }
-
-
{
-
score: average_score.round(3),
-
compliant: compliant_count == validation_results.length,
-
compliant_steps: compliant_count,
-
total_steps: validation_results.length,
-
compliance_rate: (compliant_count.to_f / validation_results.length * 100).round(1)
-
}
-
end
-
-
def generate_journey_compliance_recommendations(validation_results, overall_compliance)
-
recommendations = []
-
-
# Overall recommendations
-
if overall_compliance[:score] < 0.8
-
recommendations << {
-
type: 'overall_improvement',
-
priority: 'high',
-
message: 'Journey has low brand compliance overall',
-
action: 'Review and update content across multiple steps'
-
}
-
end
-
-
# Step-specific recommendations
-
validation_results.each do |result|
-
next if result[:compliant]
-
-
recommendations << {
-
type: 'step_improvement',
-
priority: result[:score] < 0.5 ? 'high' : 'medium',
-
step_id: result[:step_id],
-
step_name: result[:step_name],
-
message: "Step has #{result[:violations]&.length || 0} brand violations",
-
action: 'Review content against brand guidelines'
-
}
-
end
-
-
recommendations
-
end
-
-
def generate_brand_performance_insights(compliance_summary, compliance_by_step, violations_breakdown, brand_health, compliance_trend)
-
insights = []
-
-
# Compliance trend insight
-
case compliance_trend
-
when 'improving'
-
insights << {
-
type: 'positive_trend',
-
message: 'Brand compliance is improving over time',
-
impact: 'Brand consistency is strengthening'
-
}
-
when 'declining'
-
insights << {
-
type: 'negative_trend',
-
message: 'Brand compliance is declining',
-
impact: 'Brand consistency may be weakening'
-
}
-
end
-
-
# Step performance insights
-
if compliance_by_step.any?
-
worst_performing_step = compliance_by_step.min_by { |_, data| data[:average_score] }
-
best_performing_step = compliance_by_step.max_by { |_, data| data[:average_score] }
-
-
if worst_performing_step[1][:average_score] < 0.6
-
insights << {
-
type: 'step_performance',
-
message: "Step ID #{worst_performing_step[0]} has consistently low compliance",
-
impact: 'May negatively affect brand perception'
-
}
-
end
-
-
if best_performing_step[1][:average_score] > 0.9
-
insights << {
-
type: 'step_success',
-
message: "Step ID #{best_performing_step[0]} maintains excellent brand compliance",
-
impact: 'Can serve as a template for other steps'
-
}
-
end
-
end
-
-
# Violation pattern insights
-
if violations_breakdown[:by_category].any?
-
most_common_violation = violations_breakdown[:by_category].max_by { |_, count| count }
-
-
insights << {
-
type: 'violation_pattern',
-
message: "Most common violation type: #{most_common_violation[0]}",
-
impact: 'Focus improvement efforts on this area'
-
}
-
end
-
-
insights
-
end
-
-
def generate_brand_performance_recommendations(insights, alerts)
-
recommendations = []
-
-
# Convert alerts to recommendations
-
alerts.each do |alert|
-
recommendations << {
-
type: alert[:type],
-
priority: alert[:severity],
-
message: alert[:message],
-
action: alert[:recommendation]
-
}
-
end
-
-
# Add insight-based recommendations
-
insights.each do |insight|
-
case insight[:type]
-
when 'negative_trend'
-
recommendations << {
-
type: 'trend_improvement',
-
priority: 'high',
-
message: 'Address declining compliance trend',
-
action: 'Audit recent content changes and reinforce brand guidelines'
-
}
-
when 'violation_pattern'
-
recommendations << {
-
type: 'pattern_fix',
-
priority: 'medium',
-
message: 'Address common violation pattern',
-
action: "Focus on improving #{insight[:message].split(': ').last} compliance"
-
}
-
end
-
end
-
-
recommendations.uniq { |r| [r[:type], r[:message]] }
-
end
-
-
def generate_sync_recommendations(sync_results)
-
recommendations = []
-
-
# Find steps that need immediate attention
-
critical_steps = sync_results.select { |r| r[:requires_attention] && r[:updated_score] < 0.5 }
-
-
if critical_steps.any?
-
recommendations << {
-
type: 'critical_fixes',
-
priority: 'high',
-
message: "#{critical_steps.length} steps require immediate attention",
-
action: 'Review and fix critical brand violations',
-
step_ids: critical_steps.map { |s| s[:step_id] }
-
}
-
end
-
-
# Find steps with significant score decreases
-
declining_steps = sync_results.select { |r| r[:score_change] < -0.2 }
-
-
if declining_steps.any?
-
recommendations << {
-
type: 'score_decline',
-
priority: 'medium',
-
message: "#{declining_steps.length} steps show significant compliance decline",
-
action: 'Investigate what changed in brand guidelines',
-
step_ids: declining_steps.map { |s| s[:step_id] }
-
}
-
end
-
-
recommendations
-
end
-
-
def store_integration_insights(operation_type, data)
-
journey.journey_insights.create!(
-
insights_type: 'brand_integration',
-
data: data.merge(
-
operation_type: operation_type,
-
integration_timestamp: Time.current,
-
brand_id: journey.brand&.id
-
),
-
calculated_at: Time.current,
-
expires_at: 7.days.from_now,
-
metadata: {
-
service: 'BrandIntegrationService',
-
user_id: user&.id,
-
context: integration_context
-
}
-
)
-
rescue => e
-
Rails.logger.error "Failed to store integration insights: #{e.message}"
-
end
-
-
def build_validation_summary(validation_results)
-
return {} if validation_results.empty?
-
-
{
-
total_steps: validation_results.length,
-
compliant_steps: validation_results.count { |r| r[:compliant] },
-
average_score: (validation_results.sum { |r| r[:score] || 0.0 } / validation_results.length).round(3),
-
total_violations: validation_results.sum { |r| (r[:violations] || []).length }
-
}
-
end
-
-
def build_enhancement_summary(enhancement_results)
-
return {} if enhancement_results.empty?
-
-
enhanced_count = enhancement_results.count { |r| r[:enhanced] }
-
-
{
-
total_steps: enhancement_results.length,
-
enhanced_steps: enhanced_count,
-
enhancement_rate: (enhanced_count.to_f / enhancement_results.length * 100).round(1),
-
average_improvement: calculate_average_improvement(enhancement_results)
-
}
-
end
-
-
def build_sync_summary(sync_results)
-
return {} if sync_results.empty?
-
-
{
-
total_steps: sync_results.length,
-
steps_requiring_attention: sync_results.count { |r| r[:requires_attention] },
-
average_score_change: (sync_results.sum { |r| r[:score_change] } / sync_results.length).round(3),
-
improved_steps: sync_results.count { |r| r[:score_change] > 0 },
-
declined_steps: sync_results.count { |r| r[:score_change] < 0 }
-
}
-
end
-
-
def calculate_average_improvement(enhancement_results)
-
enhanced_results = enhancement_results.select { |r| r[:enhanced] && r[:improved_score] && r[:original_score] }
-
return 0.0 if enhanced_results.empty?
-
-
improvements = enhanced_results.map { |r| r[:improved_score] - r[:original_score] }
-
(improvements.sum / improvements.length).round(3)
-
end
-
-
def check_brand_setup_health
-
brand = journey.brand
-
issues = []
-
-
issues << "No messaging framework" unless brand.messaging_framework.present?
-
issues << "No active brand guidelines" unless brand.brand_guidelines.active.any?
-
issues << "No brand voice attributes" unless brand.brand_voice_attributes.present?
-
-
{ healthy: issues.empty?, issues: issues }
-
end
-
-
def check_journey_compliance_health
-
compliance_summary = journey.brand_compliance_summary(7)
-
-
if compliance_summary.empty?
-
{ healthy: false, issues: ["No recent compliance checks"] }
-
elsif compliance_summary[:average_score] < 0.7
-
{ healthy: false, issues: ["Low average compliance score: #{compliance_summary[:average_score]}"] }
-
else
-
{ healthy: true, issues: [] }
-
end
-
end
-
-
def check_integration_performance_health
-
recent_insights = journey.journey_insights
-
.where(insights_type: 'brand_integration')
-
.where('calculated_at >= ?', 24.hours.ago)
-
-
if recent_insights.empty?
-
{ healthy: false, issues: ["No recent integration activity"] }
-
else
-
{ healthy: true, issues: [] }
-
end
-
end
-
-
def check_recent_activity_health
-
recent_updates = journey.journey_steps.where('updated_at >= ?', 24.hours.ago)
-
-
if recent_updates.any?
-
# Check if recent updates maintained compliance
-
low_compliance_updates = recent_updates.select { |step| step.quick_compliance_score < 0.7 }
-
-
if low_compliance_updates.any?
-
{ healthy: false, issues: ["Recent updates decreased compliance"] }
-
else
-
{ healthy: true, issues: [] }
-
end
-
else
-
{ healthy: true, issues: [] }
-
end
-
end
-
-
def generate_health_recommendations(health_indicators)
-
recommendations = []
-
-
health_indicators.each do |indicator_name, indicator_data|
-
next if indicator_data[:healthy]
-
-
indicator_data[:issues].each do |issue|
-
case indicator_name
-
when :brand_setup
-
recommendations << {
-
type: 'brand_setup',
-
priority: 'high',
-
message: issue,
-
action: get_brand_setup_action(issue)
-
}
-
when :journey_compliance
-
recommendations << {
-
type: 'compliance_improvement',
-
priority: 'medium',
-
message: issue,
-
action: 'Review and improve journey content'
-
}
-
when :integration_performance
-
recommendations << {
-
type: 'integration_activity',
-
priority: 'low',
-
message: issue,
-
action: 'Run brand integration operations'
-
}
-
when :recent_activity
-
recommendations << {
-
type: 'recent_compliance',
-
priority: 'medium',
-
message: issue,
-
action: 'Review recent changes for brand compliance'
-
}
-
end
-
end
-
end
-
-
recommendations
-
end
-
-
def get_brand_setup_action(issue)
-
case issue
-
when /messaging framework/
-
'Set up brand messaging framework with key messages and tone'
-
when /brand guidelines/
-
'Create active brand guidelines for content validation'
-
when /voice attributes/
-
'Define brand voice attributes and tone guidelines'
-
else
-
'Complete brand setup'
-
end
-
end
-
-
def handle_integration_error(error, operation)
-
Rails.logger.error "Brand integration error in #{operation}: #{error.message}"
-
Rails.logger.error error.backtrace.join("\n")
-
-
{
-
success: false,
-
error: error.message,
-
error_type: error.class.name,
-
operation: operation,
-
timestamp: Time.current
-
}
-
end
-
-
def no_brand_suggestions_result
-
{
-
success: true,
-
suggestions: [],
-
brand_integration: {
-
brand_filtered: 0,
-
brand_enhanced: 0,
-
compliance_applied: false,
-
message: 'No brand associated with journey'
-
}
-
}
-
end
-
-
def no_brand_validation_result
-
{
-
success: true,
-
overall_compliance: { score: 1.0, compliant: true },
-
step_results: [],
-
recommendations: [],
-
validation_summary: {},
-
message: 'No brand guidelines to validate against'
-
}
-
end
-
-
def no_brand_enhancement_result
-
{
-
success: true,
-
enhancement_results: [],
-
summary: {},
-
message: 'No brand guidelines for enhancement or auto-fix disabled'
-
}
-
end
-
-
def no_brand_analysis_result
-
{
-
success: true,
-
brand_health_score: 1.0,
-
compliance_trend: 'stable',
-
insights: [],
-
recommendations: [],
-
alerts: [],
-
message: 'No brand associated for analysis'
-
}
-
end
-
-
def no_brand_sync_result
-
{
-
success: true,
-
sync_results: [],
-
recommendations: [],
-
summary: {},
-
message: 'No brand guidelines to sync'
-
}
-
end
-
end
-
end
-
class JourneyComparisonService
-
def initialize(journey_ids)
-
@journey_ids = Array(journey_ids)
-
@journeys = Journey.where(id: @journey_ids).includes(:journey_analytics, :journey_metrics, :campaign, :persona)
-
end
-
-
def compare_performance(period = 'daily', days = 30)
-
return { error: 'Need at least 2 journeys to compare' } if @journeys.count < 2
-
-
{
-
comparison_overview: comparison_overview,
-
performance_metrics: compare_performance_metrics(period, days),
-
conversion_funnels: compare_conversion_funnels(days),
-
engagement_analysis: compare_engagement_metrics(period, days),
-
recommendations: generate_comparison_recommendations,
-
statistical_analysis: statistical_significance_analysis,
-
period_info: {
-
period: period,
-
days: days,
-
start_date: days.days.ago,
-
end_date: Time.current
-
}
-
}
-
end
-
-
def comparison_overview
-
@journeys.map do |journey|
-
{
-
id: journey.id,
-
name: journey.name,
-
status: journey.status,
-
campaign: journey.campaign&.name,
-
persona: journey.campaign&.persona&.name,
-
total_steps: journey.total_steps,
-
created_at: journey.created_at,
-
performance_score: journey.latest_performance_score
-
}
-
end
-
end
-
-
def compare_performance_metrics(period = 'daily', days = 30)
-
start_date = days.days.ago
-
end_date = Time.current
-
-
metrics_comparison = {}
-
-
@journeys.each do |journey|
-
analytics = journey.journey_analytics
-
.where(period_start: start_date..end_date)
-
.where(aggregation_period: period)
-
-
if analytics.any?
-
metrics_comparison[journey.id] = {
-
journey_name: journey.name,
-
total_executions: analytics.sum(:total_executions),
-
completed_executions: analytics.sum(:completed_executions),
-
abandoned_executions: analytics.sum(:abandoned_executions),
-
average_conversion_rate: analytics.average(:conversion_rate)&.round(2) || 0,
-
average_engagement_score: analytics.average(:engagement_score)&.round(2) || 0,
-
average_completion_time: analytics.average(:average_completion_time)&.round(2) || 0,
-
completion_rate: calculate_completion_rate(analytics),
-
abandonment_rate: calculate_abandonment_rate(analytics)
-
}
-
else
-
metrics_comparison[journey.id] = default_metrics(journey)
-
end
-
end
-
-
# Add relative performance rankings
-
add_performance_rankings(metrics_comparison)
-
end
-
-
def compare_conversion_funnels(days = 30)
-
start_date = days.days.ago
-
end_date = Time.current
-
-
funnel_comparison = {}
-
-
@journeys.each do |journey|
-
funnel_data = journey.funnel_performance('default', days)
-
-
if funnel_data.any?
-
funnel_comparison[journey.id] = {
-
journey_name: journey.name,
-
funnel_overview: funnel_data,
-
stage_breakdown: analyze_funnel_stages(funnel_data),
-
bottlenecks: identify_journey_bottlenecks(funnel_data)
-
}
-
else
-
funnel_comparison[journey.id] = {
-
journey_name: journey.name,
-
funnel_overview: {},
-
stage_breakdown: {},
-
bottlenecks: []
-
}
-
end
-
end
-
-
# Compare funnel efficiency across journeys
-
funnel_comparison[:cross_journey_analysis] = analyze_cross_journey_funnels(funnel_comparison)
-
-
funnel_comparison
-
end
-
-
def compare_engagement_metrics(period = 'daily', days = 30)
-
engagement_comparison = {}
-
-
@journeys.each do |journey|
-
metrics = JourneyMetric.get_journey_dashboard_metrics(journey.id, period)
-
-
engagement_metrics = metrics.select { |metric_name, _|
-
JourneyMetric::ENGAGEMENT_METRICS.include?(metric_name)
-
}
-
-
engagement_comparison[journey.id] = {
-
journey_name: journey.name,
-
engagement_metrics: engagement_metrics,
-
engagement_score: calculate_overall_engagement_score(engagement_metrics),
-
engagement_trends: JourneyMetric.get_metric_trend(journey.id, 'engagement_score', 7, period)
-
}
-
end
-
-
# Rank journeys by engagement
-
engagement_comparison[:rankings] = rank_by_engagement(engagement_comparison)
-
-
engagement_comparison
-
end
-
-
def statistical_significance_analysis
-
return {} if @journeys.count != 2
-
-
journey1, journey2 = @journeys
-
-
# Get recent analytics for both journeys
-
analytics1 = journey1.journey_analytics.recent.limit(10)
-
analytics2 = journey2.journey_analytics.recent.limit(10)
-
-
return {} if analytics1.empty? || analytics2.empty?
-
-
{
-
conversion_rate_significance: calculate_metric_significance(
-
analytics1.pluck(:conversion_rate),
-
analytics2.pluck(:conversion_rate),
-
'conversion_rate'
-
),
-
engagement_score_significance: calculate_metric_significance(
-
analytics1.pluck(:engagement_score),
-
analytics2.pluck(:engagement_score),
-
'engagement_score'
-
),
-
execution_volume_significance: calculate_metric_significance(
-
analytics1.pluck(:total_executions),
-
analytics2.pluck(:total_executions),
-
'total_executions'
-
),
-
overall_assessment: generate_significance_assessment(analytics1, analytics2)
-
}
-
end
-
-
def generate_comparison_recommendations
-
return [] if @journeys.count < 2
-
-
recommendations = []
-
performance_metrics = compare_performance_metrics
-
-
# Find best and worst performers
-
best_performer = performance_metrics.max_by { |_, metrics| metrics[:average_conversion_rate] }
-
worst_performer = performance_metrics.min_by { |_, metrics| metrics[:average_conversion_rate] }
-
-
if best_performer && worst_performer && best_performer[0] != worst_performer[0]
-
best_journey = @journeys.find(best_performer[0])
-
worst_journey = @journeys.find(worst_performer[0])
-
-
conversion_diff = best_performer[1][:average_conversion_rate] - worst_performer[1][:average_conversion_rate]
-
-
if conversion_diff > 2.0
-
recommendations << {
-
type: 'optimization_opportunity',
-
priority: 'high',
-
title: 'Significant Performance Gap Identified',
-
description: "#{best_journey.name} outperforms #{worst_journey.name} by #{conversion_diff.round(1)}% conversion rate.",
-
action_items: [
-
"Analyze successful elements from #{best_journey.name}",
-
"Consider A/B testing best practices from high-performer",
-
"Review journey flow differences for optimization opportunities"
-
],
-
best_performer: best_journey.name,
-
worst_performer: worst_journey.name
-
}
-
end
-
end
-
-
# Engagement analysis recommendations
-
engagement_comparison = compare_engagement_metrics
-
low_engagement_journeys = engagement_comparison.select do |journey_id, data|
-
next false if journey_id == :rankings
-
data[:engagement_score] < 60
-
end
-
-
if low_engagement_journeys.any?
-
recommendations << {
-
type: 'engagement_improvement',
-
priority: 'medium',
-
title: 'Low Engagement Detected',
-
description: "#{low_engagement_journeys.count} journey(s) have engagement scores below 60%.",
-
action_items: [
-
'Review content relevance and quality',
-
'Analyze user interaction patterns',
-
'Consider personalizing content based on persona'
-
],
-
affected_journeys: low_engagement_journeys.map { |_, data| data[:journey_name] }
-
}
-
end
-
-
# Funnel analysis recommendations
-
funnel_comparison = compare_conversion_funnels
-
journeys_with_bottlenecks = funnel_comparison.select do |journey_id, data|
-
next false if journey_id == :cross_journey_analysis
-
data[:bottlenecks].any?
-
end
-
-
if journeys_with_bottlenecks.any?
-
recommendations << {
-
type: 'funnel_optimization',
-
priority: 'high',
-
title: 'Conversion Bottlenecks Identified',
-
description: "Multiple journeys have identified conversion bottlenecks that may be limiting performance.",
-
action_items: [
-
'Focus on optimizing identified bottleneck stages',
-
'Consider alternative approaches for problematic steps',
-
'Implement progressive disclosure for complex steps'
-
],
-
bottleneck_details: journeys_with_bottlenecks.map do |journey_id, data|
-
{
-
journey: data[:journey_name],
-
bottlenecks: data[:bottlenecks]
-
}
-
end
-
}
-
end
-
-
recommendations
-
end
-
-
def self.benchmark_against_industry(journey, industry_metrics = {})
-
# This would compare journey metrics against industry benchmarks
-
# For now, use default benchmarks
-
default_benchmarks = {
-
conversion_rate: 5.0,
-
engagement_score: 70.0,
-
completion_rate: 65.0,
-
abandonment_rate: 35.0
-
}
-
-
benchmarks = industry_metrics.empty? ? default_benchmarks : industry_metrics
-
journey_metrics = journey.analytics_summary(30)
-
-
return {} if journey_metrics.empty?
-
-
comparison = {}
-
-
benchmarks.each do |metric, benchmark_value|
-
journey_value = case metric
-
when :conversion_rate
-
journey_metrics[:average_conversion_rate]
-
when :completion_rate
-
journey_metrics[:completed_executions].to_f /
-
[journey_metrics[:total_executions], 1].max * 100
-
when :abandonment_rate
-
journey_metrics[:abandoned_executions].to_f /
-
[journey_metrics[:total_executions], 1].max * 100
-
else
-
journey_metrics[metric] || 0
-
end
-
-
performance_rating = if journey_value >= benchmark_value * 1.2
-
'excellent'
-
elsif journey_value >= benchmark_value
-
'above_average'
-
elsif journey_value >= benchmark_value * 0.8
-
'average'
-
else
-
'below_average'
-
end
-
-
comparison[metric] = {
-
journey_value: journey_value.round(2),
-
benchmark_value: benchmark_value,
-
difference: (journey_value - benchmark_value).round(2),
-
performance_rating: performance_rating
-
}
-
end
-
-
comparison
-
end
-
-
private
-
-
def calculate_completion_rate(analytics)
-
total_executions = analytics.sum(:total_executions)
-
completed_executions = analytics.sum(:completed_executions)
-
-
return 0 if total_executions == 0
-
(completed_executions.to_f / total_executions * 100).round(2)
-
end
-
-
def calculate_abandonment_rate(analytics)
-
total_executions = analytics.sum(:total_executions)
-
abandoned_executions = analytics.sum(:abandoned_executions)
-
-
return 0 if total_executions == 0
-
(abandoned_executions.to_f / total_executions * 100).round(2)
-
end
-
-
def default_metrics(journey)
-
{
-
journey_name: journey.name,
-
total_executions: 0,
-
completed_executions: 0,
-
abandoned_executions: 0,
-
average_conversion_rate: 0,
-
average_engagement_score: 0,
-
average_completion_time: 0,
-
completion_rate: 0,
-
abandonment_rate: 0
-
}
-
end
-
-
def add_performance_rankings(metrics_comparison)
-
# Rank journeys by conversion rate
-
sorted_by_conversion = metrics_comparison.sort_by { |_, metrics| -metrics[:average_conversion_rate] }
-
-
sorted_by_conversion.each_with_index do |(journey_id, metrics), index|
-
metrics[:conversion_rate_rank] = index + 1
-
end
-
-
# Rank by engagement score
-
sorted_by_engagement = metrics_comparison.sort_by { |_, metrics| -metrics[:average_engagement_score] }
-
-
sorted_by_engagement.each_with_index do |(journey_id, metrics), index|
-
metrics[:engagement_score_rank] = index + 1
-
end
-
-
# Calculate overall performance rank
-
metrics_comparison.each do |journey_id, metrics|
-
overall_score = (metrics[:average_conversion_rate] * 0.4 +
-
metrics[:average_engagement_score] * 0.3 +
-
metrics[:completion_rate] * 0.3)
-
metrics[:overall_performance_score] = overall_score.round(2)
-
end
-
-
sorted_by_overall = metrics_comparison.sort_by { |_, metrics| -metrics[:overall_performance_score] }
-
sorted_by_overall.each_with_index do |(journey_id, metrics), index|
-
metrics[:overall_rank] = index + 1
-
end
-
-
metrics_comparison
-
end
-
-
def analyze_funnel_stages(funnel_data)
-
return {} unless funnel_data[:stages]
-
-
stages = funnel_data[:stages]
-
stage_analysis = {}
-
-
stages.each_with_index do |stage, index|
-
next_stage = stages[index + 1]
-
-
stage_analysis[stage[:stage]] = {
-
conversion_rate: stage[:conversion_rate],
-
drop_off_rate: stage[:drop_off_rate],
-
visitors: stage[:visitors],
-
conversions: stage[:conversions],
-
efficiency: next_stage ?
-
(next_stage[:visitors].to_f / stage[:conversions] * 100).round(1) : 100
-
}
-
end
-
-
stage_analysis
-
end
-
-
def identify_journey_bottlenecks(funnel_data)
-
return [] unless funnel_data[:stages]
-
-
stages = funnel_data[:stages]
-
bottlenecks = []
-
-
stages.each do |stage|
-
if stage[:drop_off_rate] > 50
-
bottlenecks << {
-
stage: stage[:stage],
-
drop_off_rate: stage[:drop_off_rate],
-
severity: stage[:drop_off_rate] > 70 ? 'high' : 'medium'
-
}
-
end
-
end
-
-
bottlenecks
-
end
-
-
def analyze_cross_journey_funnels(funnel_comparison)
-
return {} if funnel_comparison.empty?
-
-
stage_performance = {}
-
-
Journey::STAGES.each do |stage|
-
stage_data = []
-
-
funnel_comparison.each do |journey_id, data|
-
next if journey_id == :cross_journey_analysis
-
-
stage_breakdown = data[:stage_breakdown][stage]
-
if stage_breakdown
-
stage_data << {
-
journey_id: journey_id,
-
journey_name: data[:journey_name],
-
conversion_rate: stage_breakdown[:conversion_rate],
-
drop_off_rate: stage_breakdown[:drop_off_rate]
-
}
-
end
-
end
-
-
next if stage_data.empty?
-
-
best_performer = stage_data.max_by { |d| d[:conversion_rate] }
-
worst_performer = stage_data.min_by { |d| d[:conversion_rate] }
-
-
stage_performance[stage] = {
-
average_conversion_rate: (stage_data.sum { |d| d[:conversion_rate] } / stage_data.count).round(2),
-
best_performer: best_performer,
-
worst_performer: worst_performer,
-
performance_spread: (best_performer[:conversion_rate] - worst_performer[:conversion_rate]).round(2)
-
}
-
end
-
-
stage_performance
-
end
-
-
def calculate_overall_engagement_score(engagement_metrics)
-
return 0 if engagement_metrics.empty?
-
-
scores = engagement_metrics.values.map { |metric| metric[:value] || 0 }
-
(scores.sum / scores.count).round(2)
-
end
-
-
def rank_by_engagement(engagement_comparison)
-
engagement_scores = engagement_comparison.reject { |k, _| k == :rankings }
-
.map { |journey_id, data| [journey_id, data[:engagement_score]] }
-
.sort_by { |_, score| -score }
-
-
rankings = {}
-
engagement_scores.each_with_index do |(journey_id, score), index|
-
journey_name = engagement_comparison[journey_id][:journey_name]
-
rankings[index + 1] = {
-
journey_id: journey_id,
-
journey_name: journey_name,
-
engagement_score: score
-
}
-
end
-
-
rankings
-
end
-
-
def calculate_metric_significance(values1, values2, metric_name)
-
return {} if values1.empty? || values2.empty?
-
-
mean1 = values1.sum.to_f / values1.count
-
mean2 = values2.sum.to_f / values2.count
-
-
# Simple t-test approximation
-
variance1 = values1.sum { |x| (x - mean1) ** 2 } / [values1.count - 1, 1].max
-
variance2 = values2.sum { |x| (x - mean2) ** 2 } / [values2.count - 1, 1].max
-
-
pooled_se = Math.sqrt(variance1 / values1.count + variance2 / values2.count)
-
-
return {} if pooled_se == 0
-
-
t_stat = (mean1 - mean2).abs / pooled_se
-
-
# Simplified significance determination
-
significance_level = if t_stat > 2.58
-
'highly_significant'
-
elsif t_stat > 1.96
-
'significant'
-
elsif t_stat > 1.64
-
'marginally_significant'
-
else
-
'not_significant'
-
end
-
-
{
-
metric_name: metric_name,
-
mean1: mean1.round(2),
-
mean2: mean2.round(2),
-
difference: (mean1 - mean2).round(2),
-
t_statistic: t_stat.round(3),
-
significance_level: significance_level,
-
sample_sizes: [values1.count, values2.count]
-
}
-
end
-
-
def generate_significance_assessment(analytics1, analytics2)
-
journey1_name = @journeys.first.name
-
journey2_name = @journeys.last.name
-
-
mean_conversion1 = analytics1.average(:conversion_rate) || 0
-
mean_conversion2 = analytics2.average(:conversion_rate) || 0
-
-
if (mean_conversion1 - mean_conversion2).abs < 1.0
-
"Performance between #{journey1_name} and #{journey2_name} is statistically similar"
-
elsif mean_conversion1 > mean_conversion2
-
"#{journey1_name} shows significantly better conversion performance than #{journey2_name}"
-
else
-
"#{journey2_name} shows significantly better conversion performance than #{journey1_name}"
-
end
-
end
-
end
-
class JourneyFlowEngine
-
attr_reader :execution, :journey, :user
-
-
def initialize(execution)
-
@execution = execution
-
@journey = execution.journey
-
@user = execution.user
-
end
-
-
def self.start_journey(journey, user, context = {})
-
execution = find_or_create_execution(journey, user)
-
engine = new(execution)
-
engine.start!(context)
-
end
-
-
def self.find_or_create_execution(journey, user)
-
JourneyExecution.find_or_create_by(journey: journey, user: user) do |exec|
-
exec.execution_context = {}
-
end
-
end
-
-
def start!(initial_context = {})
-
return execution if execution.running? || execution.completed?
-
-
# Add initial context
-
initial_context.each { |key, value| execution.add_context(key, value) }
-
-
# Find entry point
-
entry_step = find_entry_step
-
unless entry_step
-
execution.fail!
-
raise "No entry step found for journey #{journey.name}"
-
end
-
-
execution.update!(current_step: entry_step)
-
execution.start!
-
-
# Create first step execution
-
step_execution = execution.step_executions.create!(
-
journey_step: entry_step,
-
started_at: Time.current,
-
context: execution.execution_context.dup
-
)
-
-
execution
-
end
-
-
def advance!
-
# Check if we can advance (running state and not at exit point)
-
return false unless execution.running?
-
return false if execution.current_step&.is_exit_point?
-
-
current_step_execution = execution.step_executions
-
.where(journey_step: execution.current_step)
-
.last
-
-
# Complete current step if not already completed
-
if current_step_execution&.pending?
-
current_step_execution.complete!
-
end
-
-
# Find next step based on conditions
-
next_step = evaluate_next_step
-
-
if next_step
-
execution.update!(current_step: next_step)
-
-
# Create new step execution
-
execution.step_executions.create!(
-
journey_step: next_step,
-
started_at: Time.current,
-
context: execution.execution_context.dup
-
)
-
-
# Check if this is an exit point
-
if next_step.is_exit_point?
-
execution.complete!
-
end
-
-
true
-
else
-
# No more steps - complete the journey
-
execution.complete!
-
false
-
end
-
end
-
-
def pause!
-
execution.pause! if execution.may_pause?
-
end
-
-
def resume!
-
execution.resume! if execution.may_resume?
-
end
-
-
def fail!(reason = nil)
-
execution.add_context('failure_reason', reason) if reason
-
execution.fail! if execution.may_fail?
-
end
-
-
def evaluate_conditions(step, context = nil)
-
context ||= execution.execution_context
-
step.evaluate_conditions(context)
-
end
-
-
def get_available_next_steps
-
return [] unless execution.current_step
-
-
current_step = execution.current_step
-
available_steps = []
-
-
# Check conditional transitions first (ordered by priority)
-
current_step.transitions_from.includes(:to_step).order(:priority).each do |transition|
-
if transition.evaluate(execution.execution_context)
-
available_steps << {
-
step: transition.to_step,
-
transition_type: transition.transition_type,
-
conditions_met: true
-
}
-
break # Return only the first (highest priority) matching transition
-
end
-
end
-
-
# If no conditional transitions, check sequential next step
-
if available_steps.empty?
-
next_sequential = journey.journey_steps
-
.where('position > ?', current_step.position)
-
.order(:position)
-
.first
-
-
if next_sequential
-
available_steps << {
-
step: next_sequential,
-
transition_type: 'sequential',
-
conditions_met: true
-
}
-
end
-
end
-
-
available_steps
-
end
-
-
def simulate_journey(context = {})
-
simulation_context = execution.execution_context.merge(context)
-
current_step = execution.current_step || find_entry_step
-
visited_steps = []
-
max_steps = 50 # Prevent infinite loops
-
-
while current_step && visited_steps.length < max_steps
-
visited_steps << {
-
step: current_step,
-
stage: current_step.stage,
-
conditions: current_step.conditions
-
}
-
-
# Find next step based on simulation context
-
next_step = nil
-
current_step.transitions_from.each do |transition|
-
if transition.evaluate(simulation_context)
-
next_step = transition.to_step
-
break
-
end
-
end
-
-
# Break if we hit an exit point
-
break if current_step.is_exit_point?
-
-
# If no conditional transition, try sequential
-
next_step ||= journey.journey_steps
-
.where('position > ?', current_step.position)
-
.order(:position)
-
.first
-
-
current_step = next_step
-
end
-
-
visited_steps
-
end
-
-
private
-
-
def find_entry_step
-
# First try explicit entry points
-
entry_step = journey.journey_steps.entry_points.first
-
-
# Fall back to first step by position
-
entry_step ||= journey.journey_steps.order(:position).first
-
-
entry_step
-
end
-
-
def evaluate_next_step
-
current_step = execution.current_step
-
return nil unless current_step
-
-
# Check conditional transitions first (ordered by priority)
-
current_step.transitions_from.includes(:to_step).order(:priority).each do |transition|
-
if transition.evaluate(execution.execution_context)
-
return transition.to_step
-
end
-
end
-
-
# Fall back to sequential next step
-
journey.journey_steps
-
.where('position > ?', current_step.position)
-
.order(:position)
-
.first
-
end
-
end
-
2
class JourneySuggestionEngine
-
# AI providers configuration
-
PROVIDERS = {
-
2
openai: {
-
api_url: 'https://api.openai.com/v1/chat/completions',
-
model: 'gpt-4-turbo-preview',
-
headers: ->(api_key) { { 'Authorization' => "Bearer #{api_key}", 'Content-Type' => 'application/json' } }
-
},
-
anthropic: {
-
api_url: 'https://api.anthropic.com/v1/messages',
-
model: 'claude-3-sonnet-20240229',
-
headers: ->(api_key) { { 'x-api-key' => api_key, 'Content-Type' => 'application/json', 'anthropic-version' => '2023-06-01' } }
-
}
-
}.freeze
-
-
2
FEEDBACK_TYPES = %w[suggestion_quality relevance usefulness timing channel_fit].freeze
-
2
CACHE_TTL = 1.hour
-
-
2
attr_reader :journey, :user, :current_step, :provider
-
-
2
def initialize(journey:, user:, current_step: nil, provider: :openai)
-
@journey = journey
-
@user = user
-
@current_step = current_step
-
@provider = provider.to_sym
-
@http_client = build_http_client
-
end
-
-
# Main method to generate contextual suggestions for the next journey step
-
2
def generate_suggestions(filters = {})
-
cache_key = build_cache_key(filters)
-
-
Rails.cache.fetch(cache_key, expires_in: CACHE_TTL) do
-
context = build_journey_context
-
suggestions = fetch_ai_suggestions(context, filters)
-
ranked_suggestions = rank_suggestions(suggestions, context)
-
-
store_journey_insights(ranked_suggestions, context)
-
-
ranked_suggestions
-
end
-
end
-
-
# Generate suggestions for specific stage and context
-
2
def suggest_for_stage(stage, filters = {})
-
context = build_stage_context(stage)
-
suggestions = fetch_ai_suggestions(context, filters.merge(stage: stage))
-
rank_suggestions(suggestions, context)
-
end
-
-
# Record user feedback on suggestions for learning
-
2
def record_feedback(suggested_step_data, feedback_type, rating: nil, selected: false, context: nil)
-
return unless FEEDBACK_TYPES.include?(feedback_type)
-
-
SuggestionFeedback.create!(
-
journey: journey,
-
journey_step: current_step,
-
suggested_step_id: suggested_step_data[:id],
-
user: user,
-
feedback_type: feedback_type,
-
rating: rating,
-
selected: selected,
-
context: context,
-
metadata: {
-
suggested_step_data: suggested_step_data,
-
timestamp: Time.current,
-
provider: provider
-
}
-
)
-
end
-
-
# Get historical feedback for learning algorithm
-
2
def get_feedback_insights
-
journey.suggestion_feedbacks
-
.joins(:journey_step)
-
.group(:feedback_type)
-
.average(:rating)
-
end
-
-
2
private
-
-
2
def build_http_client
-
Faraday.new do |faraday|
-
faraday.request :json
-
faraday.response :json, content_type: /\bjson$/
-
faraday.adapter Faraday.default_adapter
-
faraday.request :retry, max: 3, interval: 0.5
-
end
-
end
-
-
2
def build_journey_context
-
base_context = {
-
journey: {
-
name: journey.name,
-
description: journey.description,
-
campaign_type: journey.campaign_type,
-
target_audience: journey.target_audience,
-
goals: journey.goals,
-
current_status: journey.status,
-
total_steps: journey.total_steps,
-
stages_coverage: journey.steps_by_stage
-
},
-
current_step: current_step&.as_json(
-
only: [:name, :description, :stage, :content_type, :channel, :duration_days],
-
include: { next_steps: { only: [:name, :stage, :content_type] } }
-
),
-
existing_steps: journey.journey_steps.by_position.map do |step|
-
{
-
name: step.name,
-
stage: step.stage,
-
content_type: step.content_type,
-
channel: step.channel,
-
position: step.position
-
}
-
end,
-
user_preferences: extract_user_preferences,
-
historical_performance: get_historical_performance,
-
industry_best_practices: get_best_practices_for_campaign_type
-
}
-
-
# Add brand context if journey has an associated brand
-
if journey.brand_id.present?
-
base_context[:brand] = extract_brand_context
-
end
-
-
base_context
-
end
-
-
2
def build_stage_context(stage)
-
build_journey_context.merge(
-
target_stage: stage,
-
stage_gaps: identify_stage_gaps(stage),
-
stage_performance: get_stage_performance(stage)
-
)
-
end
-
-
2
def fetch_ai_suggestions(context, filters)
-
prompt = build_suggestion_prompt(context, filters)
-
-
raw_suggestions = case provider
-
when :openai
-
fetch_openai_suggestions(prompt)
-
when :anthropic
-
fetch_anthropic_suggestions(prompt)
-
else
-
raise ArgumentError, "Unsupported provider: #{provider}"
-
end
-
-
# Apply brand guideline filtering if brand context is available
-
if context[:brand].present?
-
filter_suggestions_by_brand_guidelines(raw_suggestions, context[:brand])
-
else
-
raw_suggestions
-
end
-
rescue => e
-
Rails.logger.error "AI suggestion generation failed: #{e.message}"
-
generate_fallback_suggestions(context, filters)
-
end
-
-
2
def build_suggestion_prompt(context, filters)
-
base_prompt = <<~PROMPT
-
You are an expert marketing journey strategist. Based on the following journey context,
-
suggest 3-5 highly relevant next steps that would optimize the customer journey.
-
-
Journey Context:
-
#{context.to_json}
-
-
Filters Applied:
-
#{filters.to_json}
-
-
Please provide suggestions in the following JSON format:
-
{
-
"suggestions": [
-
{
-
"name": "Step name",
-
"description": "Detailed description",
-
"stage": "awareness|consideration|conversion|retention|advocacy",
-
"content_type": "email|blog_post|social_post|landing_page|video|webinar|etc",
-
"channel": "email|website|facebook|instagram|etc",
-
"duration_days": 1-30,
-
"reasoning": "Why this step would be effective",
-
"confidence_score": 0.0-1.0,
-
"expected_impact": "high|medium|low",
-
"priority": 1-5,
-
"best_practices": ["practice1", "practice2"],
-
"success_metrics": ["metric1", "metric2"],
-
"brand_compliance_score": 0.0-1.0
-
}
-
]
-
}
-
-
Focus on:
-
1. Logical progression from current step
-
2. Addressing gaps in the journey stages
-
3. Optimizing for the stated goals
-
4. Leveraging successful patterns from similar campaigns
-
5. Considering target audience preferences
-
PROMPT
-
-
# Add brand-specific guidelines if available
-
if context[:brand].present?
-
base_prompt += <<~BRAND_CONTEXT
-
-
BRAND COMPLIANCE REQUIREMENTS:
-
#{format_brand_guidelines_for_prompt(context[:brand])}
-
-
IMPORTANT: All suggestions must strictly adhere to brand guidelines.
-
Include a brand_compliance_score (0.0-1.0) for each suggestion indicating
-
how well it aligns with the brand voice, messaging, and visual guidelines.
-
BRAND_CONTEXT
-
end
-
-
if filters[:stage]
-
base_prompt += "\n\nSpecial focus: Generate suggestions specifically for the '#{filters[:stage]}' stage."
-
end
-
-
if filters[:content_type]
-
base_prompt += "\n\nContent preference: Prioritize '#{filters[:content_type]}' content types."
-
end
-
-
if filters[:channel]
-
base_prompt += "\n\nChannel preference: Focus on '#{filters[:channel]}' channel opportunities."
-
end
-
-
base_prompt
-
end
-
-
2
def fetch_openai_suggestions(prompt)
-
config = PROVIDERS[:openai]
-
api_key = Rails.application.credentials.openai_api_key
-
-
return generate_fallback_suggestions({}, {}) unless api_key
-
-
response = @http_client.post(config[:api_url]) do |req|
-
req.headers.merge!(config[:headers].call(api_key))
-
req.body = {
-
model: config[:model],
-
messages: [
-
{ role: 'system', content: 'You are a marketing journey optimization expert.' },
-
{ role: 'user', content: prompt }
-
],
-
temperature: 0.7,
-
max_tokens: 2000
-
}
-
end
-
-
if response.success?
-
content = response.body.dig('choices', 0, 'message', 'content')
-
JSON.parse(content)['suggestions']
-
else
-
Rails.logger.error "OpenAI API error: #{response.body}"
-
generate_fallback_suggestions({}, {})
-
end
-
end
-
-
2
def fetch_anthropic_suggestions(prompt)
-
config = PROVIDERS[:anthropic]
-
api_key = Rails.application.credentials.anthropic_api_key
-
-
return generate_fallback_suggestions({}, {}) unless api_key
-
-
response = @http_client.post(config[:api_url]) do |req|
-
req.headers.merge!(config[:headers].call(api_key))
-
req.body = {
-
model: config[:model],
-
max_tokens: 2000,
-
messages: [
-
{ role: 'user', content: prompt }
-
]
-
}
-
end
-
-
if response.success?
-
content = response.body.dig('content', 0, 'text')
-
JSON.parse(content)['suggestions']
-
else
-
Rails.logger.error "Anthropic API error: #{response.body}"
-
generate_fallback_suggestions({}, {})
-
end
-
end
-
-
2
def rank_suggestions(suggestions, context)
-
return suggestions unless suggestions.is_a?(Array)
-
-
# Apply learning algorithm based on historical feedback
-
feedback_insights = get_feedback_insights
-
-
suggestions.map do |suggestion|
-
base_score = suggestion['confidence_score'] || 0.5
-
-
# Adjust score based on historical feedback
-
feedback_adjustment = calculate_feedback_adjustment(suggestion, feedback_insights)
-
-
# Adjust for journey completeness
-
completeness_adjustment = calculate_completeness_adjustment(suggestion, context)
-
-
# Adjust for user preferences
-
preference_adjustment = calculate_preference_adjustment(suggestion, context)
-
-
# Adjust for brand compliance if brand context is available
-
brand_adjustment = context[:brand].present? ?
-
calculate_brand_compliance_adjustment(suggestion, context[:brand]) : 0.0
-
-
final_score = [
-
base_score + feedback_adjustment + completeness_adjustment + preference_adjustment + brand_adjustment,
-
1.0
-
].min
-
-
suggestion.merge(
-
'calculated_score' => final_score,
-
'ranking_factors' => {
-
'base_confidence' => base_score,
-
'feedback_adjustment' => feedback_adjustment,
-
'completeness_adjustment' => completeness_adjustment,
-
'preference_adjustment' => preference_adjustment,
-
'brand_compliance_adjustment' => brand_adjustment
-
}
-
)
-
end.sort_by { |s| -s['calculated_score'] }
-
end
-
-
2
def calculate_feedback_adjustment(suggestion, feedback_insights)
-
# Weight suggestions based on historical feedback for similar content types and stages
-
content_type_rating = feedback_insights["#{suggestion['content_type']}_rating"] || 3.0
-
stage_rating = feedback_insights["#{suggestion['stage']}_rating"] || 3.0
-
-
# Convert 1-5 rating to -0.2 to +0.2 adjustment
-
((content_type_rating + stage_rating) / 2 - 3.0) * 0.1
-
end
-
-
2
def calculate_completeness_adjustment(suggestion, context)
-
# Favor suggestions that fill gaps in the journey
-
existing_stages = context[:journey][:stages_coverage].keys
-
suggested_stage = suggestion['stage']
-
-
# Boost score if this stage is underrepresented
-
stage_count = context[:journey][:stages_coverage][suggested_stage] || 0
-
total_steps = context[:journey][:total_steps] || 1
-
-
if stage_count < (total_steps / 5.0) # If stage has less than 20% representation
-
0.15
-
elsif stage_count == 0 # If stage is completely missing
-
0.25
-
else
-
0.0
-
end
-
end
-
-
2
def calculate_preference_adjustment(suggestion, context)
-
# Adjust based on user's historical preferences and journey goals
-
user_prefs = context[:user_preferences]
-
-
adjustment = 0.0
-
-
# Favor preferred content types
-
if user_prefs[:preferred_content_types]&.include?(suggestion['content_type'])
-
adjustment += 0.1
-
end
-
-
# Favor preferred channels
-
if user_prefs[:preferred_channels]&.include?(suggestion['channel'])
-
adjustment += 0.1
-
end
-
-
adjustment
-
end
-
-
2
def generate_fallback_suggestions(context, filters)
-
# Fallback suggestions based on common patterns and templates
-
stage = filters[:stage] || detect_next_logical_stage
-
-
case stage
-
when 'awareness'
-
generate_awareness_suggestions
-
when 'consideration'
-
generate_consideration_suggestions
-
when 'conversion'
-
generate_conversion_suggestions
-
when 'retention'
-
generate_retention_suggestions
-
when 'advocacy'
-
generate_advocacy_suggestions
-
else
-
generate_general_suggestions
-
end
-
end
-
-
2
def detect_next_logical_stage
-
return 'awareness' unless current_step
-
-
stage_progression = %w[awareness consideration conversion retention advocacy]
-
current_stage_index = stage_progression.index(current_step.stage) || 0
-
-
# Move to next stage or stay in current if it's the last one
-
stage_progression[current_stage_index + 1] || current_step.stage
-
end
-
-
2
def generate_awareness_suggestions
-
[
-
{
-
'name' => 'Educational Blog Post',
-
'description' => 'Create valuable content that addresses target audience pain points',
-
'stage' => 'awareness',
-
'content_type' => 'blog_post',
-
'channel' => 'website',
-
'duration_days' => 7,
-
'reasoning' => 'Blog content drives organic traffic and establishes thought leadership',
-
'confidence_score' => 0.8,
-
'calculated_score' => 0.8
-
},
-
{
-
'name' => 'Social Media Campaign',
-
'description' => 'Engaging social content to increase brand visibility',
-
'stage' => 'awareness',
-
'content_type' => 'social_post',
-
'channel' => 'facebook',
-
'duration_days' => 3,
-
'reasoning' => 'Social media expands reach and engagement with target audience',
-
'confidence_score' => 0.75,
-
'calculated_score' => 0.75
-
}
-
]
-
end
-
-
2
def generate_consideration_suggestions
-
[
-
{
-
'name' => 'Product Demo Video',
-
'description' => 'Showcase product features and benefits through video demonstration',
-
'stage' => 'consideration',
-
'content_type' => 'video',
-
'channel' => 'website',
-
'duration_days' => 5,
-
'reasoning' => 'Video content helps prospects understand product value proposition',
-
'confidence_score' => 0.85,
-
'calculated_score' => 0.85
-
},
-
{
-
'name' => 'Comparison Guide',
-
'description' => 'Detailed comparison of solutions to help decision making',
-
'stage' => 'consideration',
-
'content_type' => 'ebook',
-
'channel' => 'email',
-
'duration_days' => 7,
-
'reasoning' => 'Comparison content addresses evaluation criteria concerns',
-
'confidence_score' => 0.8,
-
'calculated_score' => 0.8
-
}
-
]
-
end
-
-
2
def generate_conversion_suggestions
-
[
-
{
-
'name' => 'Limited Time Offer',
-
'description' => 'Time-sensitive promotion to encourage immediate action',
-
'stage' => 'conversion',
-
'content_type' => 'email',
-
'channel' => 'email',
-
'duration_days' => 3,
-
'reasoning' => 'Urgency and scarcity drive conversion behavior',
-
'confidence_score' => 0.9,
-
'calculated_score' => 0.9
-
},
-
{
-
'name' => 'Free Trial Landing Page',
-
'description' => 'Dedicated page optimized for trial sign-ups',
-
'stage' => 'conversion',
-
'content_type' => 'landing_page',
-
'channel' => 'website',
-
'duration_days' => 1,
-
'reasoning' => 'Reduces friction and focuses on conversion goal',
-
'confidence_score' => 0.85,
-
'calculated_score' => 0.85
-
}
-
]
-
end
-
-
2
def generate_retention_suggestions
-
[
-
{
-
'name' => 'Onboarding Email Series',
-
'description' => 'Multi-part email series to guide new customers',
-
'stage' => 'retention',
-
'content_type' => 'email',
-
'channel' => 'email',
-
'duration_days' => 14,
-
'reasoning' => 'Proper onboarding increases customer lifetime value',
-
'confidence_score' => 0.9,
-
'calculated_score' => 0.9
-
}
-
]
-
end
-
-
2
def generate_advocacy_suggestions
-
[
-
{
-
'name' => 'Customer Success Story',
-
'description' => 'Showcase customer achievements and testimonials',
-
'stage' => 'advocacy',
-
'content_type' => 'case_study',
-
'channel' => 'website',
-
'duration_days' => 7,
-
'reasoning' => 'Success stories build credibility and encourage referrals',
-
'confidence_score' => 0.85,
-
'calculated_score' => 0.85
-
}
-
]
-
end
-
-
2
def generate_general_suggestions
-
[
-
{
-
'name' => 'Welcome Email',
-
'description' => 'Introductory email to new subscribers or customers',
-
'stage' => 'awareness',
-
'content_type' => 'email',
-
'channel' => 'email',
-
'duration_days' => 1,
-
'reasoning' => 'Sets expectations and begins relationship building',
-
'confidence_score' => 0.7,
-
'calculated_score' => 0.7
-
}
-
]
-
end
-
-
2
def extract_user_preferences
-
# Analyze user's historical journey patterns
-
user_journeys = user.journeys.published
-
-
{
-
preferred_content_types: calculate_preferred_content_types(user_journeys),
-
preferred_channels: calculate_preferred_channels(user_journeys),
-
avg_journey_length: calculate_avg_journey_length(user_journeys),
-
successful_patterns: identify_successful_patterns(user_journeys)
-
}
-
end
-
-
2
def calculate_preferred_content_types(journeys)
-
journeys.joins(:journey_steps)
-
.group('journey_steps.content_type')
-
.count
-
.sort_by { |_, count| -count }
-
.first(3)
-
.map(&:first)
-
.compact
-
end
-
-
2
def calculate_preferred_channels(journeys)
-
journeys.joins(:journey_steps)
-
.group('journey_steps.channel')
-
.count
-
.sort_by { |_, count| -count }
-
.first(3)
-
.map(&:first)
-
.compact
-
end
-
-
2
def calculate_avg_journey_length(journeys)
-
return 0 if journeys.empty?
-
-
journeys.joins(:journey_steps).group(:id).count.values.sum.to_f / journeys.count
-
end
-
-
2
def identify_successful_patterns(journeys)
-
# This would analyze successful journeys based on execution data
-
# For now, return empty hash - to be implemented with analytics
-
{}
-
end
-
-
2
def get_historical_performance
-
# Analyze performance of similar journey steps
-
# This would integrate with analytics data
-
{}
-
end
-
-
2
def get_best_practices_for_campaign_type
-
# Return best practices based on campaign type from templates
-
return {} unless journey.campaign_type
-
-
template = JourneyTemplate.where(campaign_type: journey.campaign_type)
-
.order(usage_count: :desc)
-
.first
-
-
template&.best_practices || {}
-
end
-
-
2
def identify_stage_gaps(target_stage)
-
existing_stages = journey.journey_steps.pluck(:stage).uniq
-
all_stages = Journey::STAGES
-
-
all_stages - existing_stages
-
end
-
-
2
def get_stage_performance(stage)
-
# Analyze performance of steps in this stage
-
# This would integrate with analytics data
-
{}
-
end
-
-
2
def store_journey_insights(suggestions, context)
-
JourneyInsight.create!(
-
journey: journey,
-
insights_type: 'ai_suggestions',
-
data: {
-
suggestions: suggestions,
-
context_summary: {
-
total_steps: context[:journey][:total_steps],
-
stages_coverage: context[:journey][:stages_coverage],
-
provider: provider
-
},
-
generated_at: Time.current
-
},
-
calculated_at: Time.current,
-
expires_at: 24.hours.from_now,
-
metadata: {
-
provider: provider,
-
user_id: user.id,
-
current_step_id: current_step&.id
-
}
-
)
-
end
-
-
2
def build_cache_key(filters)
-
key_parts = [
-
"journey_suggestions",
-
journey.id,
-
journey.updated_at.to_i,
-
current_step&.id,
-
user.id,
-
provider,
-
Digest::MD5.hexdigest(filters.to_json)
-
]
-
-
# Include brand context in cache key if available
-
if journey.brand_id.present?
-
key_parts << journey.brand_id
-
key_parts << journey.brand.updated_at.to_i
-
end
-
-
key_parts.join(":")
-
end
-
-
# Brand-related helper methods
-
2
def extract_brand_context
-
brand = journey.brand
-
return {} unless brand
-
-
{
-
id: brand.id,
-
name: brand.name,
-
industry: brand.industry,
-
brand_voice: extract_brand_voice(brand),
-
messaging_framework: extract_messaging_framework(brand),
-
guidelines: extract_brand_guidelines(brand),
-
color_scheme: brand.color_scheme || {},
-
typography: brand.typography || {},
-
visual_identity: extract_visual_identity(brand)
-
}
-
end
-
-
2
def extract_brand_voice(brand)
-
voice_data = brand.brand_voice_attributes || {}
-
latest_analysis = brand.latest_analysis
-
-
if latest_analysis&.voice_attributes.present?
-
voice_data.merge(latest_analysis.voice_attributes)
-
else
-
voice_data
-
end
-
end
-
-
2
def extract_messaging_framework(brand)
-
framework = brand.messaging_framework
-
return {} unless framework
-
-
{
-
key_messages: framework.key_messages || {},
-
value_propositions: framework.value_propositions || {},
-
approved_phrases: framework.approved_phrases || [],
-
banned_words: framework.banned_words || [],
-
tone_attributes: framework.tone_attributes || {}
-
}
-
end
-
-
2
def extract_brand_guidelines(brand)
-
guidelines = brand.brand_guidelines.active.order(priority: :desc).limit(10)
-
-
guidelines.map do |guideline|
-
{
-
category: guideline.category,
-
rule_type: guideline.rule_type,
-
rule_text: guideline.rule_text,
-
priority: guideline.priority,
-
compliance_level: guideline.compliance_level
-
}
-
end
-
end
-
-
2
def extract_visual_identity(brand)
-
{
-
primary_colors: brand.primary_colors,
-
secondary_colors: brand.secondary_colors,
-
font_families: brand.font_families,
-
has_brand_assets: brand.has_complete_brand_assets?
-
}
-
end
-
-
2
def format_brand_guidelines_for_prompt(brand_context)
-
guidelines_text = []
-
-
# Brand voice and tone
-
if brand_context[:brand_voice].present?
-
guidelines_text << "Brand Voice: #{brand_context[:brand_voice].to_json}"
-
end
-
-
# Messaging framework
-
framework = brand_context[:messaging_framework]
-
if framework.present?
-
guidelines_text << "Key Messages: #{framework[:key_messages].to_json}" if framework[:key_messages].present?
-
guidelines_text << "Value Propositions: #{framework[:value_propositions].to_json}" if framework[:value_propositions].present?
-
guidelines_text << "Approved Phrases: #{framework[:approved_phrases].join(', ')}" if framework[:approved_phrases].any?
-
guidelines_text << "Banned Words: #{framework[:banned_words].join(', ')}" if framework[:banned_words].any?
-
guidelines_text << "Tone Requirements: #{framework[:tone_attributes].to_json}" if framework[:tone_attributes].present?
-
end
-
-
# Brand guidelines
-
if brand_context[:guidelines].any?
-
guidelines_text << "Brand Guidelines:"
-
brand_context[:guidelines].each do |guideline|
-
guidelines_text << "- #{guideline[:category]} (#{guideline[:rule_type]}): #{guideline[:rule_text]}"
-
end
-
end
-
-
# Visual identity
-
visual = brand_context[:visual_identity]
-
if visual.present?
-
guidelines_text << "Primary Colors: #{visual[:primary_colors].join(', ')}" if visual[:primary_colors].any?
-
guidelines_text << "Typography: #{visual[:font_families].keys.join(', ')}" if visual[:font_families].any?
-
end
-
-
guidelines_text.join("\n")
-
end
-
-
2
def filter_suggestions_by_brand_guidelines(suggestions, brand_context)
-
return suggestions unless suggestions.is_a?(Array)
-
-
framework = brand_context[:messaging_framework] || {}
-
banned_words = framework[:banned_words] || []
-
-
# Filter out suggestions that contain banned words
-
filtered_suggestions = suggestions.reject do |suggestion|
-
text_content = "#{suggestion['name']} #{suggestion['description']}".downcase
-
banned_words.any? { |word| text_content.include?(word.downcase) }
-
end
-
-
# Add compliance warnings for potentially problematic suggestions
-
filtered_suggestions.map do |suggestion|
-
warnings = []
-
-
# Check for tone compliance
-
if framework[:tone_attributes].present?
-
tone_warnings = check_tone_compliance(suggestion, framework[:tone_attributes])
-
warnings.concat(tone_warnings)
-
end
-
-
suggestion['compliance_warnings'] = warnings if warnings.any?
-
suggestion
-
end
-
end
-
-
2
def check_tone_compliance(suggestion, tone_attributes)
-
warnings = []
-
content = "#{suggestion['name']} #{suggestion['description']}".downcase
-
-
# Check formality level
-
if tone_attributes['formality'] == 'formal'
-
informal_words = ['hey', 'yeah', 'cool', 'awesome', 'gonna', 'wanna']
-
found_informal = informal_words.select { |word| content.include?(word) }
-
if found_informal.any?
-
warnings << "Contains informal language: #{found_informal.join(', ')}"
-
end
-
elsif tone_attributes['formality'] == 'casual'
-
formal_words = ['utilize', 'facilitate', 'endeavor', 'subsequently']
-
found_formal = formal_words.select { |word| content.include?(word) }
-
if found_formal.any?
-
warnings << "Contains overly formal language: #{found_formal.join(', ')}"
-
end
-
end
-
-
warnings
-
end
-
-
2
def calculate_brand_compliance_adjustment(suggestion, brand_context)
-
return 0.0 unless brand_context.present?
-
-
base_compliance_score = suggestion['brand_compliance_score'] || 0.5
-
-
# Higher weight for brand compliance in scoring
-
compliance_weight = 0.3
-
-
# Convert compliance score to adjustment (-0.15 to +0.15)
-
adjustment = (base_compliance_score - 0.5) * compliance_weight
-
-
# Additional penalty for compliance warnings
-
if suggestion['compliance_warnings']&.any?
-
adjustment -= 0.1
-
end
-
-
adjustment
-
end
-
end
-
class LlmService
-
include Rails.application.routes.url_helpers
-
-
DEFAULT_MODEL = "gpt-4-turbo-preview"
-
DEFAULT_TEMPERATURE = 0.7
-
DEFAULT_MAX_TOKENS = 2000
-
-
# Model capabilities
-
JSON_CAPABLE_MODELS = %w[
-
gpt-4-turbo-preview gpt-4-1106-preview gpt-3.5-turbo-1106
-
claude-3-opus-20240229 claude-3-sonnet-20240229 claude-3-haiku-20240307
-
].freeze
-
-
# Provider-specific settings
-
PROVIDER_CONFIGS = {
-
openai: {
-
base_url: "https://api.openai.com",
-
models: /^(gpt|text-davinci|babbage|curie|ada)/,
-
json_mode: true
-
},
-
anthropic: {
-
base_url: "https://api.anthropic.com",
-
models: /^claude/,
-
json_mode: false # Claude doesn't have native JSON mode
-
},
-
cohere: {
-
base_url: "https://api.cohere.ai",
-
models: /^command/,
-
json_mode: false
-
},
-
huggingface: {
-
base_url: "https://api-inference.huggingface.co",
-
models: /^(meta-llama|mistral|falcon)/,
-
json_mode: false
-
}
-
}.freeze
-
-
def initialize(model: DEFAULT_MODEL, temperature: DEFAULT_TEMPERATURE)
-
@model = model
-
@temperature = temperature
-
@provider = detect_provider
-
@client = build_client
-
end
-
-
def analyze(prompt, options = {})
-
# Add JSON formatting instructions if requested
-
formatted_prompt = if options[:json_response]
-
ensure_json_response(prompt)
-
else
-
prompt
-
end
-
-
# Build request with retries for rate limits
-
response = nil
-
retries = 0
-
max_retries = 3
-
-
begin
-
response = @client.post do |req|
-
req.url completion_endpoint
-
req.headers.merge!(provider_headers)
-
req.body = build_request_body(formatted_prompt, options).to_json
-
end
-
-
parsed = parse_response(response)
-
-
# If JSON was requested, validate and clean the response
-
if options[:json_response]
-
parsed = ensure_valid_json(parsed)
-
end
-
-
parsed
-
rescue Faraday::TooManyRequestsError => e
-
retries += 1
-
if retries < max_retries
-
wait_time = extract_retry_after(e) || (2 ** retries)
-
Rails.logger.warn "Rate limited, waiting #{wait_time}s before retry #{retries}/#{max_retries}"
-
sleep(wait_time)
-
retry
-
else
-
handle_api_error(e)
-
end
-
rescue Faraday::Error => e
-
Rails.logger.error "LLM API Error: #{e.message}"
-
handle_api_error(e)
-
end
-
end
-
-
def ensure_json_response(prompt)
-
json_instruction = "\n\nIMPORTANT: You must respond with valid JSON only. Do not include any text before or after the JSON. Do not use markdown formatting. The response should be a raw JSON object that can be parsed directly."
-
-
# Add JSON schema hint if the prompt mentions a structure
-
if prompt.include?("JSON structure:")
-
prompt + json_instruction
-
else
-
prompt + "\n\nProvide your response as a valid JSON object." + json_instruction
-
end
-
end
-
-
def ensure_valid_json(response)
-
return nil if response.nil? || response.empty?
-
-
# Try to extract JSON from the response
-
json_match = response.match(/\{.*\}/m) || response.match(/\[.*\]/m)
-
-
if json_match
-
begin
-
JSON.parse(json_match[0])
-
json_match[0] # Return the matched JSON string
-
rescue JSON::ParserError => e
-
Rails.logger.error "Invalid JSON in LLM response: #{e.message}"
-
Rails.logger.debug "Attempted to parse: #{json_match[0][0..500]}..."
-
response # Return original response as fallback
-
end
-
else
-
Rails.logger.warn "No JSON found in LLM response"
-
response
-
end
-
end
-
-
def extract_retry_after(error)
-
# Extract retry-after header if available
-
if error.response && error.response[:headers]['retry-after']
-
error.response[:headers]['retry-after'].to_i
-
elsif error.response && error.response[:headers]['x-ratelimit-reset']
-
[error.response[:headers]['x-ratelimit-reset'].to_i - Time.now.to_i, 1].max
-
else
-
nil
-
end
-
end
-
-
def generate_suggestions(context, options = {})
-
prompt = build_suggestion_prompt(context)
-
analyze(prompt, options.merge(temperature: 0.8))
-
end
-
-
def validate_content(content, brand_guidelines, options = {})
-
prompt = build_validation_prompt(content, brand_guidelines)
-
analyze(prompt, options.merge(temperature: 0.3))
-
end
-
-
private
-
-
def detect_provider
-
PROVIDER_CONFIGS.find { |_, config| @model.match?(config[:models]) }&.first || :openai
-
end
-
-
def build_client
-
Faraday.new(url: api_base_url) do |faraday|
-
faraday.request :json
-
faraday.response :json
-
faraday.adapter Faraday.default_adapter
-
-
# Add retry logic for network errors
-
faraday.request :retry, {
-
max: 3,
-
interval: 0.5,
-
interval_randomness: 0.5,
-
backoff_factor: 2,
-
exceptions: [Faraday::ConnectionFailed, Faraday::TimeoutError]
-
}
-
-
# Add timeout settings
-
faraday.options.timeout = 120 # 2 minutes
-
faraday.options.open_timeout = 30
-
end
-
end
-
-
def provider_headers
-
headers = { 'Content-Type' => 'application/json' }
-
-
case @provider
-
when :openai
-
headers['Authorization'] = "Bearer #{api_key}"
-
when :anthropic
-
headers['x-api-key'] = api_key
-
headers['anthropic-version'] = '2023-06-01'
-
when :cohere
-
headers['Authorization'] = "Bearer #{api_key}"
-
when :huggingface
-
headers['Authorization'] = "Bearer #{api_key}"
-
else
-
headers['Authorization'] = "Bearer #{api_key}"
-
end
-
-
headers
-
end
-
-
def api_base_url
-
PROVIDER_CONFIGS[@provider][:base_url] || ENV['LLM_API_BASE_URL'] || "https://api.openai.com"
-
end
-
-
def api_key
-
case @provider
-
when :openai
-
ENV['OPENAI_API_KEY']
-
when :anthropic
-
ENV['ANTHROPIC_API_KEY']
-
when :cohere
-
ENV['COHERE_API_KEY']
-
when :huggingface
-
ENV['HUGGINGFACE_API_KEY']
-
else
-
ENV['LLM_API_KEY'] || ENV['OPENAI_API_KEY']
-
end
-
end
-
-
def completion_endpoint
-
case @provider
-
when :openai
-
"/v1/chat/completions"
-
when :anthropic
-
"/v1/messages"
-
when :cohere
-
"/v1/generate"
-
when :huggingface
-
"/models/#{@model}"
-
else
-
"/v1/chat/completions"
-
end
-
end
-
-
def build_request_body(prompt, options)
-
max_tokens = options[:max_tokens] || DEFAULT_MAX_TOKENS
-
temperature = options[:temperature] || @temperature
-
system_message = options[:system_message] || "You are a brand analysis and marketing expert. Provide detailed, actionable insights."
-
-
case @provider
-
when :openai
-
body = {
-
model: @model,
-
messages: [
-
{
-
role: "system",
-
content: system_message
-
},
-
{
-
role: "user",
-
content: prompt
-
}
-
],
-
temperature: temperature,
-
max_tokens: max_tokens
-
}
-
-
# Add JSON mode if supported and requested
-
if options[:json_response] && JSON_CAPABLE_MODELS.include?(@model)
-
body[:response_format] = { type: "json_object" }
-
end
-
-
body
-
when :anthropic
-
{
-
model: @model,
-
messages: [
-
{
-
role: "user",
-
content: "#{system_message}\n\n#{prompt}"
-
}
-
],
-
max_tokens: max_tokens,
-
temperature: temperature
-
}
-
when :cohere
-
{
-
model: @model,
-
prompt: "#{system_message}\n\n#{prompt}",
-
max_tokens: max_tokens,
-
temperature: temperature,
-
return_likelihoods: "NONE"
-
}
-
when :huggingface
-
{
-
inputs: prompt,
-
parameters: {
-
max_new_tokens: max_tokens,
-
temperature: temperature,
-
return_full_text: false
-
}
-
}
-
else
-
{
-
model: @model,
-
messages: [
-
{
-
role: "user",
-
content: prompt
-
}
-
],
-
temperature: temperature,
-
max_tokens: max_tokens
-
}
-
end
-
end
-
-
def parse_response(response)
-
return nil unless response.success?
-
-
case @provider
-
when :openai
-
response.body.dig("choices", 0, "message", "content")
-
when :anthropic
-
response.body.dig("content", 0, "text")
-
when :cohere
-
response.body.dig("generations", 0, "text") || response.body.dig("text")
-
when :huggingface
-
if response.body.is_a?(Array)
-
response.body.first["generated_text"]
-
else
-
response.body["generated_text"]
-
end
-
else
-
# Generic fallback
-
response.body.dig("choices", 0, "message", "content") ||
-
response.body.dig("content", 0, "text") ||
-
response.body.dig("generations", 0, "text") ||
-
response.body.dig("text") ||
-
response.body["generated_text"]
-
end
-
end
-
-
def handle_api_error(error)
-
error_info = case error
-
when Faraday::ResourceNotFound
-
{ error: "API endpoint not found", details: error.message, status: 404 }
-
when Faraday::UnauthorizedError
-
{ error: "Invalid API key", details: error.message, status: 401 }
-
when Faraday::TooManyRequestsError
-
{ error: "Rate limit exceeded", details: error.message, status: 429 }
-
when Faraday::BadRequestError
-
{ error: "Invalid request", details: parse_error_details(error), status: 400 }
-
when Faraday::ServerError
-
{ error: "Server error", details: error.message, status: 500 }
-
when Faraday::TimeoutError
-
{ error: "Request timeout", details: "The request took too long to complete", status: 408 }
-
else
-
{ error: "API request failed", details: error.message, status: 0 }
-
end
-
-
Rails.logger.error "LLM API Error: #{error_info[:error]} - #{error_info[:details]}"
-
error_info
-
end
-
-
def parse_error_details(error)
-
if error.response && error.response[:body]
-
body = error.response[:body]
-
-
if body.is_a?(Hash)
-
body['error']&.[]('message') || body['message'] || error.message
-
else
-
error.message
-
end
-
else
-
error.message
-
end
-
end
-
-
def build_suggestion_prompt(context)
-
<<~PROMPT
-
Based on the following context, generate content suggestions:
-
-
Brand: #{context[:brand_name]}
-
Content Type: #{context[:content_type]}
-
Campaign Goal: #{context[:campaign_goal]}
-
Target Audience: #{context[:target_audience]}
-
-
Brand Guidelines Summary:
-
#{context[:guidelines_summary]}
-
-
Please provide 3-5 specific content suggestions that align with the brand voice and campaign objectives.
-
Include for each suggestion:
-
1. Content idea/topic
-
2. Key messaging points
-
3. Recommended format/channel
-
4. Expected outcome
-
-
Format as JSON.
-
PROMPT
-
end
-
-
def build_validation_prompt(content, brand_guidelines)
-
<<~PROMPT
-
Validate the following content against brand guidelines:
-
-
Content:
-
#{content}
-
-
Brand Guidelines:
-
#{brand_guidelines}
-
-
Please analyze:
-
1. Brand voice compliance
-
2. Messaging alignment
-
3. Tone consistency
-
4. Guideline violations
-
5. Improvement suggestions
-
-
Provide a compliance score (0-100) and detailed feedback.
-
Format as JSON.
-
PROMPT
-
end
-
end
-
2
class SuspiciousActivityDetector
-
2
attr_reader :activity
-
-
# Class method for recurring job to scan all users
-
2
def self.scan_all_users
-
Rails.logger.info "Starting security scan for all users..."
-
suspicious_users = []
-
-
User.find_each do |user|
-
# Check recent activities
-
recent_activities = user.activities.where("occurred_at > ?", 1.hour.ago)
-
next if recent_activities.empty?
-
-
# Various suspicious pattern checks
-
suspicious_patterns = []
-
-
# Rapid requests
-
if recent_activities.count > 200
-
suspicious_patterns << {
-
pattern: 'rapid_requests',
-
value: recent_activities.count,
-
threshold: 200
-
}
-
end
-
-
# Multiple IPs
-
ip_count = recent_activities.distinct.count(:ip_address)
-
if ip_count > 5
-
suspicious_patterns << {
-
pattern: 'ip_hopping',
-
value: ip_count,
-
threshold: 5
-
}
-
end
-
-
# Failed requests
-
failed_count = recent_activities.failed_requests.count
-
if failed_count > 20
-
suspicious_patterns << {
-
pattern: 'excessive_errors',
-
value: failed_count,
-
threshold: 20
-
}
-
end
-
-
# Suspicious activities
-
suspicious_count = recent_activities.suspicious.count
-
if suspicious_count > 3
-
suspicious_patterns << {
-
pattern: 'multiple_suspicious',
-
value: suspicious_count,
-
threshold: 3
-
}
-
end
-
-
if suspicious_patterns.any?
-
suspicious_users << {
-
user: user,
-
patterns: suspicious_patterns,
-
activity_count: recent_activities.count
-
}
-
end
-
end
-
-
# Process findings
-
if suspicious_users.any?
-
# Log security event
-
ActivityLogger.security('security_scan_alert', "Security scan detected suspicious users", {
-
user_count: suspicious_users.count,
-
details: suspicious_users.map { |s|
-
{
-
user_id: s[:user].id,
-
email: s[:user].email_address,
-
patterns: s[:patterns].map { |p| p[:pattern] }
-
}
-
}
-
})
-
-
# Send alerts if configured
-
if Rails.application.config.activity_alerts.enabled
-
AdminMailer.security_scan_alert(suspicious_users).deliver_later
-
end
-
end
-
-
Rails.logger.info "Security scan completed. Found #{suspicious_users.count} suspicious users."
-
suspicious_users
-
end
-
-
SUSPICIOUS_PATTERNS = {
-
2
rapid_requests: {
-
threshold: 100, # requests
-
window: 60 # seconds
-
},
-
failed_logins: {
-
threshold: 5, # attempts
-
window: 300 # 5 minutes
-
},
-
unusual_hour_activity: {
-
start_hour: 2, # 2 AM
-
end_hour: 5 # 5 AM
-
},
-
ip_hopping: {
-
threshold: 3, # different IPs
-
window: 300 # 5 minutes
-
},
-
excessive_errors: {
-
threshold: 10, # 4xx/5xx errors
-
window: 300 # 5 minutes
-
}
-
}.freeze
-
-
2
def initialize(activity)
-
15
@activity = activity
-
end
-
-
2
def check
-
15
suspicious_reasons = []
-
-
15
suspicious_reasons << "rapid_requests" if rapid_requests?
-
15
suspicious_reasons << "failed_login_attempts" if failed_login_attempts?
-
15
suspicious_reasons << "unusual_hour_activity" if unusual_hour_activity?
-
15
suspicious_reasons << "ip_hopping" if ip_hopping?
-
15
suspicious_reasons << "excessive_errors" if excessive_errors?
-
15
suspicious_reasons << "suspicious_user_agent" if suspicious_user_agent?
-
15
suspicious_reasons << "suspicious_path" if suspicious_path?
-
-
15
if suspicious_reasons.any?
-
mark_as_suspicious(suspicious_reasons)
-
trigger_alert(suspicious_reasons)
-
end
-
-
15
suspicious_reasons.any?
-
end
-
-
2
private
-
-
2
def rapid_requests?
-
15
threshold = SUSPICIOUS_PATTERNS[:rapid_requests][:threshold]
-
15
window = SUSPICIOUS_PATTERNS[:rapid_requests][:window]
-
-
15
recent_count = Activity
-
.by_user(activity.user)
-
.where("occurred_at > ?", window.seconds.ago)
-
.count
-
-
15
recent_count > threshold
-
end
-
-
2
def failed_login_attempts?
-
15
return false unless activity.controller == "sessions" && activity.action == "create" && activity.failed?
-
-
1
threshold = SUSPICIOUS_PATTERNS[:failed_logins][:threshold]
-
1
window = SUSPICIOUS_PATTERNS[:failed_logins][:window]
-
-
1
failed_count = Activity
-
.by_user(activity.user)
-
.by_controller("sessions")
-
.by_action("create")
-
.failed_requests
-
.where("occurred_at > ?", window.seconds.ago)
-
.count
-
-
1
failed_count >= threshold
-
end
-
-
2
def unusual_hour_activity?
-
15
hour = activity.occurred_at.hour
-
15
start_hour = SUSPICIOUS_PATTERNS[:unusual_hour_activity][:start_hour]
-
15
end_hour = SUSPICIOUS_PATTERNS[:unusual_hour_activity][:end_hour]
-
-
15
hour >= start_hour && hour <= end_hour
-
end
-
-
2
def ip_hopping?
-
15
threshold = SUSPICIOUS_PATTERNS[:ip_hopping][:threshold]
-
15
window = SUSPICIOUS_PATTERNS[:ip_hopping][:window]
-
-
15
unique_ips = Activity
-
.by_user(activity.user)
-
.where("occurred_at > ?", window.seconds.ago)
-
.distinct
-
.pluck(:ip_address)
-
.compact
-
.size
-
-
15
unique_ips >= threshold
-
end
-
-
2
def excessive_errors?
-
15
threshold = SUSPICIOUS_PATTERNS[:excessive_errors][:threshold]
-
15
window = SUSPICIOUS_PATTERNS[:excessive_errors][:window]
-
-
15
error_count = Activity
-
.by_user(activity.user)
-
.failed_requests
-
.where("occurred_at > ?", window.seconds.ago)
-
.count
-
-
15
error_count >= threshold
-
end
-
-
2
def suspicious_user_agent?
-
15
return false unless activity.user_agent
-
-
suspicious_patterns = [
-
/bot/i,
-
/crawler/i,
-
/spider/i,
-
/scraper/i,
-
/curl/i,
-
/wget/i,
-
/python/i,
-
/java/i,
-
/ruby/i
-
]
-
-
suspicious_patterns.any? { |pattern| activity.user_agent.match?(pattern) }
-
end
-
-
2
def suspicious_path?
-
15
return false unless activity.request_path
-
-
15
suspicious_paths = [
-
/\.env/i,
-
/config\//i,
-
/admin/i,
-
/wp-admin/i,
-
/phpmyadmin/i,
-
/\.git/i,
-
/\.svn/i,
-
/backup/i,
-
/sql/i,
-
/database/i
-
]
-
-
# Skip if the user is actually an admin accessing admin paths
-
15
return false if activity.user.admin? && activity.request_path.match?(/admin/i)
-
-
165
suspicious_paths.any? { |pattern| activity.request_path.match?(pattern) }
-
end
-
-
2
def mark_as_suspicious(reasons)
-
metadata = activity.metadata || {}
-
metadata["suspicious_reasons"] = reasons
-
-
activity.update!(
-
suspicious: true,
-
metadata: metadata
-
)
-
end
-
-
2
def trigger_alert(reasons)
-
# In production, this would send notifications to admins
-
Rails.logger.warn "Suspicious activity detected for user #{activity.user.email_address}: #{reasons.join(', ')}"
-
-
# Queue alert job if configured
-
if defined?(SuspiciousActivityAlertJob)
-
SuspiciousActivityAlertJob.perform_later(activity.id, reasons)
-
end
-
end
-
end